source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
#!/usr/bin/python
import base64
from binascii import hexlify
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from distutils.spawn import find_executable
from kvirt import common
from kvirt.common import error, pprint, warning
from kvirt.defaults import UBUNTUS, METADATA_FIELDS
from math import ceil
from pyVmomi import vim, vmodl
from pyVim import connect
import json
import os
import re
import requests
import random
from ssl import _create_unverified_context, get_server_certificate
import tarfile
from tempfile import TemporaryDirectory
from threading import Thread
import time
import pyVmomi
import webbrowser
from zipfile import ZipFile
def waitForMe(t):
while t.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
time.sleep(1)
if t.info.state == vim.TaskInfo.State.error:
error(t.info.description)
error(t.info.error)
os._exit(1)
def collectproperties(si, view, objtype, pathset=None, includemors=False):
collector = si.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
objspec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
objspec.obj = view
objspec.skip = True
# Create a traversal specification to identify the path for collection
traversalspec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversalspec.name = 'traverseEntities'
traversalspec.path = 'view'
traversalspec.skip = False
traversalspec.type = view.__class__
objspec.selectSet = [traversalspec]
# Identify the properties to the retrieved
propertyspec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
propertyspec.type = objtype
if not pathset:
propertyspec.all = True
propertyspec.pathSet = pathset
# Add the object and property specification to the
# property filter specification
filterspec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = [objspec]
filterspec.propSet = [propertyspec]
# Retrieve properties
props = collector.RetrieveContents([filterspec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if includemors:
properties['obj'] = obj.obj
data.append(properties)
return data
def find(si, folder, vimtype, name):
o = si.content.viewManager.CreateContainerView(folder, [vimtype], True)
view = o.view
o.Destroy()
element = None
for e in view:
if e.name == name:
element = e
break
return element
def findvm(si, folder, name):
view = si.content.viewManager.CreateContainerView(folder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
vm = list(filter(lambda v: v['name'] == name, vmlist))
if len(vm) >= 1:
return vm[-1]['obj']
else:
return None
def convert(octets, GB=True):
# return str(float(octets) / 1024 / 1024 / 1024) + "GB"
result = str(ceil(float(octets) / 1024 / 1024 / 1024))
if GB:
result += "GB"
return result
def dssize(ds):
di = ds.summary
return convert(di.capacity), convert(di.freeSpace)
def makecuspec(name, nets=[], gateway=None, dns=None, domain=None):
customspec = vim.vm.customization.Specification()
ident = vim.vm.customization.LinuxPrep()
ident.hostName = vim.vm.customization.FixedName()
ident.hostName.name = name
globalip = vim.vm.customization.GlobalIPSettings()
if domain:
ident.domain = domain
customspec.identity = ident
if dns is not None or domain is not None:
if dns is not None:
globalip.dnsServerList = [dns]
# if dns2:
# globalip.dnsServerList.append(dns2)
if domain is not None:
globalip.dnsSuffixList = domain
customspec.globalIPSettings = globalip
adaptermaps = []
for index, net in enumerate(nets):
if isinstance(net, str) or (len(net) == 1 and 'name' in net):
if index == 0:
continue
# nicname = "eth%d" % index
ip = None
netmask = None
# noconf = None
# vips = []
elif isinstance(net, dict):
# nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = next((e for e in [net.get('mask'), net.get('netmask')] if e is not None), None)
# noconf = net.get('noconf')
# vips = net.get('vips')
if ip is not None and netmask is not None and gateway is not None and domain is not None:
guestmap = vim.vm.customization.AdapterMapping()
guestmap.adapter = vim.vm.customization.IPSettings()
guestmap.adapter.ip = vim.vm.customization.FixedIp()
guestmap.adapter.ip.ipAddress = ip
guestmap.adapter.subnetMask = netmask
guestmap.adapter.gateway = gateway
guestmap.adapter.dnsDomain = domain
adaptermaps.append(guestmap)
customspec.nicSettingMap = adaptermaps
return customspec
def createnicspec(nicname, netname, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
desc = vim.Description()
desc.label = nicname
nicbacking = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
desc.summary = netname
nicbacking.deviceName = netname
nic.backing = nicbacking
# nic.key = 0
nic.deviceInfo = desc
nic.addressType = 'generated'
nicspec.device = nic
return nicspec
def createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
dnicbacking = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
dvconnection = vim.dvs.DistributedVirtualSwitchPortConnection()
dvconnection.switchUuid = switchuuid
dvconnection.portgroupKey = portgroupkey
dnicbacking.port = dvconnection
nic.backing = dnicbacking
nicspec.device = nic
return nicspec
def createscsispec():
ckey = 1000
# SCSISPEC
scsispec = vim.vm.device.VirtualDeviceSpec()
scsispec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
# scsictrl = vim.vm.device.VirtualLsiLogicController()
scsictrl = vim.vm.device.ParaVirtualSCSIController()
scsictrl.key = ckey
scsictrl.busNumber = 0
scsictrl.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.noSharing
scsispec.device = scsictrl
return scsispec
def creatediskspec(number, disksize, ds, diskmode, thin=False):
ckey = 1000
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
vd = vim.vm.device.VirtualDisk()
vd.capacityInKB = disksize
diskspec.device = vd
vd.unitNumber = number
vd.controllerKey = ckey
diskfilebacking = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
filename = "[" + ds.name + "]"
diskfilebacking.fileName = filename
diskfilebacking.diskMode = diskmode
diskfilebacking.thinProvisioned = True if thin else False
vd.backing = diskfilebacking
return diskspec
def createcdspec():
# http://books.google.es/books?id=SdsnGmhF0QEC&pg=PA145&lpg=PA145&dq=VirtualCdrom%2Bspec&source=bl&ots=s8O2mw437-&sig=JpEo-AqmDV42b3fxpTcCt4xknEA&hl=es&sa=X&ei=KgGfT_DqApOy8QOl07X6Dg&redir_esc=y#v=onepage&q=VirtualCdrom%2Bspec&f=false
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.setOperation(vim.vm.device.VirtualDeviceSpec.Operation.add)
cd = vim.vm.device.VirtualCdrom()
cdbacking = vim.vm.device.VirtualCdrom.AtapiBackingInfo()
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createisospec(iso=None):
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
connect = vim.vm.device.VirtualDevice.ConnectInfo()
connect.startConnected = True
connect.allowGuestControl = True
connect.connected = False
cd = vim.vm.device.VirtualCdrom()
cd.connectable = connect
cdbacking = vim.vm.device.VirtualCdrom.IsoBackingInfo()
if iso is not None:
cdbacking.fileName = iso
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createclonespec(pool):
clonespec = vim.vm.CloneSpec()
relocatespec = vim.vm.RelocateSpec()
relocatespec.pool = pool
clonespec.location = relocatespec
clonespec.powerOn = False
clonespec.template = False
return clonespec
def create_filter_spec(pc, vms):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = ['config.extraConfig.plan']
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(results):
vms = []
for o in results.objects:
if o.propSet[0].val is not None:
vms.append(o.obj)
return vms
def changecd(si, vm, iso):
virtual_cdrom_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualCdrom):
virtual_cdrom_device = dev
cdromspec = vim.vm.device.VirtualDeviceSpec()
cdromspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdromspec.device = vim.vm.device.VirtualCdrom()
cdromspec.device.controllerKey = virtual_cdrom_device.controllerKey
cdromspec.device.key = virtual_cdrom_device.key
cdromspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdromspec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
cdromspec.device.backing.fileName = iso
cdromspec.device.connectable.connected = True
cdromspec.device.connectable.startConnected = True
cdromspec.device.connectable.allowGuestControl = True
dev_changes = []
dev_changes.append(cdromspec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = dev_changes
task = vm.ReconfigVM_Task(spec=spec)
return task
raise RuntimeError("No cdrom found")
def createfolder(si, parentfolder, folder):
if find(si, parentfolder, vim.Folder, folder) is None:
parentfolder.CreateFolder(folder)
return None
def deletefolder(si, parentfolder, folder):
folder = find(si, parentfolder, vim.Folder, folder)
if folder is not None:
folder.Destroy()
def deletedirectory(si, dc, path):
d = si.content.fileManager.DeleteFile(path, dc)
waitForMe(d)
def keep_lease_alive(lease):
while(True):
time.sleep(5)
try:
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
except:
return
class Ksphere:
def __init__(self, host, user, password, datacenter, cluster, debug=False, isofolder=None,
filtervms=False, filteruser=False, filtertag=None):
# 4-1-CONNECT
si = connect.SmartConnect(host=host, port=443, user=user, pwd=password, sslContext=_create_unverified_context())
self.conn = si
self.si = si
self.vcip = host
self.url = "https://%s:%s@%s/sdk" % (user, password, host)
self.user = user
self.password = password
self.rootFolder = si.content.rootFolder
self.dc = find(si, self.rootFolder, vim.Datacenter, datacenter)
self.macaddr = []
self.clu = cluster
self.isofolder = isofolder
self.filtervms = filtervms
self.filtervms = filtervms
self.filteruser = filteruser
self.filtertag = filtertag
self.debug = debug
self.networks = []
view = si.content.viewManager.CreateContainerView(self.rootFolder, [vim.Network], True)
netlist = collectproperties(si, view=view, objtype=vim.Network, pathset=['name'], includemors=True)
for o in netlist:
self.networks.append(o['obj'].name)
portgs = {}
o = si.content.viewManager.CreateContainerView(self.rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
uuid = dvnetw.uuid
for portg in dvnetw.portgroup:
portgs[portg.name] = [uuid, portg.key]
self.portgs = portgs
return
def close(self):
self.si.content.sessionManager.Logout()
def exists(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return True if vm is not None else False
def net_exists(self, name):
print("not implemented")
return
def create(self, name, virttype=None, profile='kvirt', flavor=None, plan='kvirt', cpumodel='host-model',
cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='centos7_64Guest', pool='default', image=None,
disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None,
vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None,
cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False,
files=[], enableroot=True, overrides={}, tags=[], storemetadata=False, sharedfolders=[],
kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False,
memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={},
securitygroups=[]):
dc = self.dc
vmFolder = dc.vmFolder
diskmode = 'persistent'
default_diskinterface = diskinterface
default_diskthin = diskthin
default_disksize = disksize
default_pool = pool
memory = int(memory)
numcpus = int(numcpus)
si = self.si
rootFolder = self.rootFolder
cluster = overrides.get('cluster')
if cluster is not None:
createfolder(si, dc.vmFolder, cluster)
vmfolder = find(si, dc.vmFolder, vim.Folder, cluster)
elif plan != 'kvirt':
createfolder(si, dc.vmFolder, plan)
vmfolder = find(si, dc.vmFolder, vim.Folder, plan)
else:
vmfolder = dc.vmFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
if image is not None:
rootFolder = self.rootFolder
imageobj = findvm(si, rootFolder, image)
if imageobj is None:
return {'result': 'failure', 'reason': "Image %s not found" % image}
clonespec = createclonespec(resourcepool)
confspec = vim.vm.ConfigSpec()
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
extraconfig = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
opt = vim.option.OptionValue()
opt.key = entry
opt.value = metadata[entry]
extraconfig.append(opt)
clonespec.config = confspec
clonespec.powerOn = False
cloudinitiso = None
if cloudinit:
if image is not None and common.needs_ignition(image):
version = common.ignition_version(image)
ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides, version=version,
plan=plan, image=image)
ignitionopt = vim.option.OptionValue()
ignitionopt.key = 'guestinfo.ignition.config.data'
ignitionopt.value = base64.b64encode(ignitiondata.encode()).decode()
encodingopt = vim.option.OptionValue()
encodingopt.key = 'guestinfo.ignition.config.data.encoding'
encodingopt.value = 'base64'
extraconfig.extend([ignitionopt, encodingopt])
else:
gcmds = []
if image is not None and 'cos' not in image and 'fedora-coreos' not in image:
lower = image.lower()
if lower.startswith('fedora') or lower.startswith('rhel') or lower.startswith('centos'):
gcmds.append('yum -y install open-vm-tools')
elif lower.startswith('debian') or [x for x in UBUNTUS if x in lower] or 'ubuntu' in lower:
gcmds.append('apt-get update')
gcmds.append('apt-get -f install open-vm-tools')
gcmds.append('systemctl enable --now vmtoolsd')
index = 0
if image is not None and image.startswith('rhel'):
subindex = [i for i, value in enumerate(cmds) if value.startswith('subscription-manager')]
if subindex:
index = subindex.pop() + 1
cmds = cmds[:index] + gcmds + cmds[index:]
# customspec = makecuspec(name, nets=nets, gateway=gateway, dns=dns, domain=domain)
# clonespec.customization = customspec
isofolder = self.isofolder if self.isofolder is not None else "[%s]/%s" % (default_pool, name)
cloudinitiso = "%s/%s.ISO" % (isofolder, name)
userdata, meta, netdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets,
gateway=gateway, dns=dns, domain=domain,
reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides,
storemetadata=storemetadata, machine='vsphere',
image=image)
confspec.extraConfig = extraconfig
t = imageobj.CloneVM_Task(folder=vmfolder, name=name, spec=clonespec)
waitForMe(t)
if cloudinitiso is not None:
with TemporaryDirectory() as tmpdir:
common.make_iso(name, tmpdir, userdata, meta, netdata)
cloudinitisofile = "%s/%s.ISO" % (tmpdir, name)
if self.isofolder is not None:
isofolder = self.isofolder.split('/')
isopool = re.sub(r"[\[\]]", '', isofolder[0])
isofolder = isofolder[1]
else:
isopool = default_pool
isofolder = None
self._uploadimage(isopool, cloudinitisofile, name, isofolder=isofolder)
vm = findvm(si, vmFolder, name)
c = changecd(self.si, vm, cloudinitiso)
waitForMe(c)
datastores = {}
confspec = vim.vm.ConfigSpec()
confspec.name = name
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
confspec.extraConfig = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
opt = vim.option.OptionValue()
opt.key = entry
opt.value = metadata[entry]
confspec.extraConfig.append(opt)
if nested:
confspec.nestedHVEnabled = True
confspec.guestId = 'centos7_64Guest'
vmfi = vim.vm.FileInfo()
filename = "[" + default_pool + "]"
vmfi.vmPathName = filename
confspec.files = vmfi
if vnc:
vncport = random.randint(5900, 7000)
opt1 = vim.option.OptionValue()
opt1.key = 'RemoteDisplay.vnc.port'
opt1.value = vncport
opt2 = vim.option.OptionValue()
opt2.key = 'RemoteDisplay.vnc.enabled'
opt2.value = "TRUE"
confspec.extraConfig = [opt1, opt2]
if image is None:
t = vmfolder.CreateVM_Task(confspec, resourcepool)
waitForMe(t)
vm = find(si, dc.vmFolder, vim.VirtualMachine, name)
currentdevices = vm.config.hardware.device
currentdisks = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualDisk)]
currentnics = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualEthernetCard)]
confspec = vim.vm.ConfigSpec()
devconfspec = []
for index, disk in enumerate(disks):
if disk is None:
disksize = default_disksize
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, int):
disksize = disk
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, str) and disk.isdigit():
disksize = int(disk)
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, dict):
disksize = disk.get('size', default_disksize)
diskthin = disk.get('thin', default_diskthin)
diskinterface = disk.get('interface', default_diskinterface)
diskpool = disk.get('pool', default_pool)
if index < len(currentdisks) and image is not None:
currentdisk = currentdisks[index]
currentsize = convert(1000 * currentdisk.capacityInKB, GB=False)
if int(currentsize) < disksize:
pprint("Waiting for image disk %s to be resized" % index)
currentdisk.capacityInKB = disksize * 1048576
diskspec = vim.vm.ConfigSpec()
diskspec = vim.vm.device.VirtualDeviceSpec(device=currentdisk, operation="edit")
devconfspec.append(diskspec)
continue
disksize = disksize * 1048576
if diskpool not in datastores:
datastore = find(si, rootFolder, vim.Datastore, diskpool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % diskpool}
else:
datastores[diskpool] = datastore
if index == 0:
scsispec = createscsispec()
devconfspec.append(scsispec)
diskspec = creatediskspec(index, disksize, datastore, diskmode, diskthin)
devconfspec.append(diskspec)
# NICSPEC
for index, net in enumerate(nets):
netname = net['name'] if isinstance(net, dict) else net
if netname == 'default':
netname = 'VM Network'
if index < len(currentnics):
currentnic = currentnics[index]
try:
currentnetwork = currentnic.backing.deviceName
except:
currentswitchuuid = currentnic.backing.port.switchUuid
currentportgroupkey = currentnic.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == currentswitchuuid and\
self.portgs[dvsnet][1] == currentportgroupkey:
currentnetwork = dvsnet
if currentnetwork != netname:
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
currentnic.backing.port.switchUuid = switchuuid
currentnic.backing.port.portgroupKey = portgroupkey
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
elif netname in self.networks:
currentnic.backing.deviceName = netname
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
continue
nicname = 'Network Adapter %d' % (index + 1)
nictype = net['type'] if isinstance(net, dict) and 'type' in net else None
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
nicspec = createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=nictype)
elif netname in self.networks:
nicspec = createnicspec(nicname, netname, nictype=nictype)
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
devconfspec.append(nicspec)
if iso:
if '/' not in iso:
matchingisos = [i for i in self._getisos() if i.endswith(iso)]
if matchingisos:
iso = matchingisos[0]
else:
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
cdspec = createisospec(iso)
devconfspec.append(cdspec)
# bootoptions = vim.option.OptionValue(key='bios.bootDeviceClasses',value='allow:hd,cd,fd,net')
# confspec.bootOptions = vim.vm.BootOptions(bootOrder=[vim.vm.BootOptions.BootableCdromDevice()])
confspec.deviceChange = devconfspec
t = vm.Reconfigure(confspec)
waitForMe(t)
if start:
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def start(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOff":
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def stop(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
return {'result': 'success'}
def status(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return vm.runtime.powerState if vm is not None else ''
def delete(self, name, snapshots=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
plan, image, kube = 'kvirt', None, None
vmpath = vm.summary.config.vmPathName.replace('/%s.vmx' % name, '')
for entry in vm.config.extraConfig:
if entry.key == 'image':
image = entry.value
if entry.key == 'plan':
plan = entry.value
if entry.key == 'kube':
kube = entry.value
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
t = vm.Destroy_Task()
waitForMe(t)
if image is not None and 'coreos' not in image and 'rhcos' not in image and\
'fcos' not in image and vmpath.endswith(name):
isopath = "%s/%s.ISO" % (self.isofolder, name) if self.isofolder is not None else vmpath
deletedirectory(si, dc, isopath)
if kube is not None:
clusterfolder = find(si, vmFolder, vim.Folder, kube)
if clusterfolder is not None and len(clusterfolder.childEntity) == 0:
clusterfolder.Destroy()
elif plan != 'kvirt':
planfolder = find(si, vmFolder, vim.Folder, plan)
if planfolder is not None and len(planfolder.childEntity) == 0:
planfolder.Destroy()
return {'result': 'success'}
def console(self, name, tunnel=False, web=False):
si = self.si
dc = self.dc
vcip = self.vcip
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
print("VM %s not found" % name)
return
elif vm.runtime.powerState == "poweredOff":
print("VM down")
return
extraconfig = vm.config.extraConfig
vncfound = False
for extra in extraconfig:
key, value = extra.key, extra.value
if 'vnc' in key and 'port' in key:
vncfound = True
vncport = value
break
else:
continue
if vncfound:
host = vm.runtime.host.name
url = "vnc://%s:%s" % (host, vncport)
consolecommand = "remote-viewer %s &" % (url)
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
print(consolecommand)
if not os.path.exists("/i_am_a_container"):
os.popen(consolecommand)
else:
content = si.RetrieveContent()
sgid = content.about.instanceUuid
cert = get_server_certificate((self.vcip, 443))
cert_deserialize = x509.load_pem_x509_certificate(cert.encode(), default_backend())
finger_print = hexlify(cert_deserialize.fingerprint(hashes.SHA1())).decode('utf-8')
sha1 = ":".join([finger_print[i: i + 2] for i in range(0, len(finger_print), 2)])
vcenter_data = content.setting
vcenter_settings = vcenter_data.setting
for item in vcenter_settings:
key = getattr(item, 'key')
if key == 'VirtualCenter.FQDN':
fqdn = getattr(item, 'value')
sessionmanager = si.content.sessionManager
session = sessionmanager.AcquireCloneTicket()
vmid = vm._moId
vmurl = "https://%s/ui/webconsole.html?" % vcip
vmurl += "vmId=%s&vmName=%s&serverGuid=%s&host=%s&sessionTicket=%s&thumbprint=%s" % (vmid, name, sgid, fqdn,
session, sha1)
if web:
return vmurl
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % vmurl if os.path.exists("/i_am_a_container") else vmurl
pprint(msg)
else:
pprint("Opening url %s" % vmurl)
webbrowser.open(vmurl, new=2, autoraise=True)
def info(self, name, output='plain', fields=[], values=False, vm=None, debug=False):
translation = {'poweredOff': 'down', 'poweredOn': 'up', 'suspended': 'suspended'}
yamlinfo = {}
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
if vm is None:
vm = findvm(si, vmFolder, name)
if vm is None:
error("VM %s not found" % name)
return {}
summary = vm.summary
yamlinfo['name'] = name
yamlinfo['id'] = summary.config.instanceUuid
yamlinfo['cpus'] = vm.config.hardware.numCPU
yamlinfo['memory'] = vm.config.hardware.memoryMB
yamlinfo['status'] = translation[vm.runtime.powerState]
yamlinfo['nets'] = []
yamlinfo['disks'] = []
devices = vm.config.hardware.device
mainmac = None
for number, dev in enumerate(devices):
if "addressType" in dir(dev):
try:
network = dev.backing.deviceName
except:
switchuuid = dev.backing.port.switchUuid
portgroupkey = dev.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == switchuuid and self.portgs[dvsnet][1] == portgroupkey:
network = dvsnet
device = dev.deviceInfo.label
devicename = type(dev).__name__.replace('vim.vm.device.Virtual', '').lower()
networktype = devicename
mac = dev.macAddress
if mainmac is None:
mainmac = mac
net = {'device': device, 'mac': mac, 'net': network, 'type': networktype}
yamlinfo['nets'].append(net)
if type(dev).__name__ == 'vim.vm.device.VirtualDisk':
device = "disk%s" % dev.unitNumber
disksize = convert(1000 * dev.capacityInKB, GB=False)
diskformat = dev.backing.diskMode
drivertype = 'thin' if dev.backing.thinProvisioned else 'thick'
path = dev.backing.datastore.name
disk = {'device': device, 'size': int(disksize), 'format': diskformat, 'type': drivertype, 'path': path}
yamlinfo['disks'].append(disk)
if vm.runtime.powerState == "poweredOn":
yamlinfo['host'] = vm.runtime.host.name
for nic in vm.guest.net:
currentmac = nic.macAddress
currentips = nic.ipAddress
if currentmac == mainmac and currentips:
yamlinfo['ip'] = currentips[0]
for entry in vm.config.extraConfig:
if entry.key in METADATA_FIELDS:
yamlinfo[entry.key] = entry.value
if entry.key == 'image':
yamlinfo['user'] = common.get_user(entry.value)
if debug:
yamlinfo['debug'] = vm.config.extraConfig
return yamlinfo
def list(self):
rootFolder = self.rootFolder
si = self.si
vms = []
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
for o in vmlist:
vm = o['obj']
if vm.summary.runtime.connectionState != 'orphaned' and not vm.config.template:
if self.filtervms and 'plan' not in [x.key for x in vm.config.extraConfig]:
continue
vms.append(self.info(o['name'], vm=vm))
return sorted(vms, key=lambda x: x['name'])
def list_pools(self):
pools = []
rootFolder = self.rootFolder
si = self.si
# dc = self.dc
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
for dts in clu.datastore:
pools.append(dts.name)
# datastorename = dts.name
# total = dssize(dts)[0].replace('GB', '')
# available = dssize(dts)[1].replace('GB', '')
# results[datastorename] = [float(total), float(available), dc.name]
return pools
def beststorage(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
bestds = ''
bestsize = 0
for dts in clu.datastore:
datastorename = dts.name
available = float(dssize(dts)[1].replace('GB', ''))
if available > bestsize:
bestsize = available
bestds = datastorename
return bestds
def _getisos(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
isos = []
results = {}
searchspec = vim.host.DatastoreBrowser.SearchSpec()
filequery = [vim.host.DatastoreBrowser.IsoImageQuery(), vim.host.DatastoreBrowser.FolderQuery()]
filequeryflags = vim.host.DatastoreBrowser.FileInfo.Details()
filequeryflags.fileSize = True
filequeryflags.modification = False
filequeryflags.fileOwner = False
filequeryflags.fileType = False
searchspec.query = filequery
searchspec.details = filequeryflags
searchspec.sortFoldersFirst = True
searchspec.searchCaseInsensitive = True
for dts in clu.datastore:
datastorename = dts.name
datastorepath = "[" + datastorename + "]"
browser = dts.browser
t = browser.SearchDatastore_Task(datastorepath, searchspec)
waitForMe(t)
result = t.info.result
fileinfo = result.file
for element in fileinfo:
folderpath = element.path
if not folderpath.endswith('iso') and 'ISO' in folderpath.upper():
t = browser.SearchDatastoreSubFolders_Task("%s%s" % (datastorepath, folderpath), searchspec)
waitForMe(t)
results = t.info.result
for r in results:
fileinfo = r.file
for isofile in fileinfo:
path = isofile.path
if path.endswith('.iso'):
isos.append("%s/%s/%s" % (datastorepath, folderpath, path))
return isos
def volumes(self, iso=False):
if iso:
return self._getisos()
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = o.view
o.Destroy()
return [v.name for v
in vmlist if v.config.template and v.summary is not
None and v.summary.runtime.connectionState != 'orphaned']
def update_metadata(self, name, metatype, metavalue, append=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
configspec = vim.vm.ConfigSpec()
opt = vim.option.OptionValue()
opt.key = metatype
opt.value = metavalue
configspec.extraConfig = [opt]
t = vm.ReconfigVM_Task(configspec)
waitForMe(t)
def update_memory(self, name, memory):
print("not implemented")
return
def update_cpus(self, name, numcpus):
print("not implemented")
return
def update_start(self, name, start=True):
print("not implemented")
return
def update_information(self, name, information):
self.update_metadata(name, 'information', information)
return
def update_iso(self, name, iso):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
isos = [i for i in self._getisos() if i.endswith(iso)]
if not isos:
error("Iso %s not found.Leaving..." % iso)
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
else:
iso = isos[0]
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
c = changecd(self.si, vm, iso)
waitForMe(c)
return {'result': 'success'}
def dnsinfo(self, name):
return None, None
def _uploadimage(self, pool, origin, directory, isofolder=None):
si = self.si
rootFolder = self.rootFolder
datastore = find(si, rootFolder, vim.Datastore, pool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
destination = os.path.basename(origin)
if isofolder is not None:
directory = isofolder
url = "https://%s:443/folder/%s/%s?dcPath=%s&dsName=%s" % (self.vcip, directory, destination, self.dc.name,
pool)
client_cookie = si._stub.cookie
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
cookie = {cookie_name: cookie_text}
headers = {'Content-Type': 'application/octet-stream'}
with open(origin, "rb") as f:
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
try:
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
except:
url = url.replace('/folder', '')
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
if r.status_code not in [200, 201]:
error("Got status %s with reason: %s" % (r.status_code, r.reason))
def get_pool_path(self, pool):
return pool
def add_disk(self, name, size=1, pool=None, thin=True, image=None, shareable=False, existing=None,
interface='virtio', novm=False, overrides={}):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
unit_number = 0
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
if unit_number == 7:
unit_number = 8
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
new_disk_kb = int(size) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.thinProvisioned = thin
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes = [disk_spec]
spec.deviceChange = dev_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_disk(self, name=None, diskname=None, pool=None, novm=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) and dev.deviceInfo.label == diskname:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Disk %s not found in %s" % (diskname, name)}
def add_nic(self, name, network):
if network == 'default':
network = 'VM Network'
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
nicnumber = len([dev for dev in vm.config.hardware.device if "addressType" in dir(dev)])
nicname = 'Network adapter %d' % (nicnumber + 1)
nicspec = createnicspec(nicname, network)
nic_changes = [nicspec]
spec.deviceChange = nic_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_nic(self, name, interface):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard) and dev.deviceInfo.label == interface:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Nic %s not found in %s" % (interface, name)}
def list_networks(self):
si = self.si
rootFolder = si.content.rootFolder
networks = {}
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
dvslist = collectproperties(si, view=view, objtype=vim.dvs.DistributedVirtualPortgroup, pathset=['name'],
includemors=True)
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.Network], True)
netlist = collectproperties(si, view=view, objtype=vim.Network, pathset=['name'], includemors=True)
for o in netlist:
network = o['obj']
cidr, dhcp, domainname = '', '', ''
mode = 'accessible' if network.summary.accessible else 'notaccessible'
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
for o in dvslist:
network = o['obj']
cidr, dhcp, domainname, mode = '', '', '', ''
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
return networks
def create_network(self, name, cidr=None, dhcp=True, nat=True, domain=None, plan='kvirt', overrides={}):
si = self.si
cluster = self.clu
networkFolder = self.dc.networkFolder
rootFolder = self.rootFolder
net = find(si, rootFolder, vim.Network, name)
if net is not None:
return {'result': 'failure', 'reason': "Network %s already there" % name}
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
for portg in dvnetw.portgroup:
if portg.name == name:
return {'result': 'failure', 'reason': "Network %s already there" % name}
if overrides.get('distributed', False):
pnic_specs = []
dvs_host_configs = []
uplink_port_names = []
dvs_create_spec = vim.DistributedVirtualSwitch.CreateSpec()
dvs_config_spec = vim.DistributedVirtualSwitch.ConfigSpec()
dvs_config_spec.name = name
dvs_config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
for x in range(len(cluster.host)):
uplink_port_names.append("dvUplink%d" % x)
for host in cluster.host:
dvs_config_spec.uplinkPortPolicy.uplinkPortName = uplink_port_names
dvs_config_spec.maxPorts = 2000
pnic_spec = vim.dvs.HostMember.PnicSpec()
pnic_spec.pnicDevice = 'vmnic1'
pnic_specs.append(pnic_spec)
dvs_host_config = vim.dvs.HostMember.ConfigSpec()
dvs_host_config.operation = vim.ConfigSpecOperation.add
dvs_host_config.host = host
dvs_host_configs.append(dvs_host_config)
dvs_host_config.backing = vim.dvs.HostMember.PnicBacking()
dvs_host_config.backing.pnicSpec = pnic_specs
dvs_config_spec.host = dvs_host_configs
dvs_create_spec.configSpec = dvs_config_spec
dvs_create_spec.productInfo = vim.dvs.ProductSpec(version='5.1.0')
networkFolder.CreateDistributedVirtualSwitch()
else:
return {'result': 'failure', 'reason': "Not implemented yet for non dvs networks"}
return {'result': 'success'}
def delete_network(self, name=None, cidr=None):
si = self.si
rootFolder = self.rootFolder
try:
net = find(si, rootFolder, vim.dvs.DistributedVirtualPortgroup, name)
net.Destroy()
except:
try:
net = find(si, rootFolder, vim.Network, name)
net.Destroy()
except:
return {'result': 'failure', 'reason': "Network %s not found" % name}
return {'result': 'success'}
def vm_ports(self, name):
return []
def add_image(self, url, pool, short=None, cmd=None, name=None, size=None):
si = self.si
rootFolder = self.rootFolder
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
vmFolder = self.dc.vmFolder
manager = si.content.ovfManager
shortimage = os.path.basename(url).split('?')[0]
if not shortimage.endswith('ova') and not shortimage.endswith('zip') and find_executable('qemu-img') is None:
msg = "qemu-img is required for conversion"
error(msg)
return {'result': 'failure', 'reason': msg}
if name is None:
name = name.replace('.ova', '').replace('.x86_64', '')
if shortimage in self.volumes():
pprint("Template %s already there" % shortimage)
return {'result': 'success'}
if not find(si, rootFolder, vim.Datastore, pool):
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
if not os.path.exists('/tmp/%s' % shortimage):
pprint("Downloading locally %s" % shortimage)
downloadcmd = "curl -Lo /tmp/%s -f '%s'" % (shortimage, url)
code = os.system(downloadcmd)
if code != 0:
return {'result': 'failure', 'reason': "Unable to download indicated image"}
else:
pprint("Using found /tmp/%s" % shortimage)
vmdk_path = None
ovf_path = None
if url.endswith('zip'):
with ZipFile("/tmp/%s" % shortimage) as zipf:
for _fil in zipf.namelist():
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
zipf.extractall('/tmp')
elif url.endswith('ova'):
with tarfile.open("/tmp/%s" % shortimage) as tar:
for _fil in [x.name for x in tar.getmembers()]:
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
tar.extractall()
else:
extension = os.path.splitext(shortimage)[1].replace('.', '')
vmdk_path = "/tmp/%s" % shortimage.replace(extension, 'vmdk')
if not os.path.exists(vmdk_path):
pprint("Converting qcow2 file to vmdk")
os.popen("qemu-img convert -O vmdk -o subformat=streamOptimized /tmp/%s %s" % (shortimage, vmdk_path))
ovf_path = "/tmp/%s" % shortimage.replace(extension, 'ovf')
commondir = os.path.dirname(common.pprint.__code__.co_filename)
time.sleep(5)
vmdk_info = json.loads(os.popen("qemu-img info %s --output json" % vmdk_path).read())
virtual_size = vmdk_info['virtual-size']
actual_size = vmdk_info['actual-size']
ovfcontent = open("%s/vm.ovf.j2" % commondir).read().format(name=shortimage, virtual_size=virtual_size,
actual_size=actual_size)
with open(ovf_path, 'w') as f:
f.write(ovfcontent)
ovfd = open(ovf_path).read()
ovfd = re.sub('<Name>.*</Name>', '<Name>%s</Name>' % name, ovfd)
datastore = find(si, rootFolder, vim.Datastore, pool)
network = find(si, rootFolder, vim.Network, 'VM Network')
networkmapping = vim.OvfManager.NetworkMapping.Array()
nm = vim.OvfManager.NetworkMapping(name="VM Network", network=network)
networkmapping.append(nm)
spec_params = vim.OvfManager.CreateImportSpecParams(diskProvisioning="thin", networkMapping=networkmapping)
import_spec = manager.CreateImportSpec(ovfd, resourcepool, datastore, spec_params)
lease = resourcepool.ImportVApp(import_spec.importSpec, vmFolder)
while True:
if lease.state == vim.HttpNfcLease.State.ready:
pprint("Uploading vmdk")
warning("If hitting any issues when uploading image, please upload manually")
host = self._getfirshost()
url = lease.info.deviceUrl[0].url.replace('*', host.name)
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
upload_cmd = (
"curl -sS -X POST --insecure -T %s -H 'Content-Type: \
application/x-vnd.vmware-streamVmdk' %s" % (vmdk_path, url))
os.system(upload_cmd)
# lease.Complete()
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
# self.export(name)
# os.remove('/tmp/%s' % shortimage)
# os.remove(ovf_path)
# os.remove(vmdk_path)
return {'result': 'success'}
elif lease.state == vim.HttpNfcLease.State.error:
error("Lease error: %s" % lease.error)
os._exit(1)
def _getfirshost(self):
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
host = view[0] if view else None
return host
def report(self):
si = self.si
about = si.content.about
print("Host: %s" % self.vcip)
print("Datacenter: %s" % self.dc.name)
print("Version: %s" % about.version)
print("Api Version: %s" % about.apiVersion)
print("Datacenter: %s" % self.dc.name)
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
for h in view:
print("Host: %s" % h.name)
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.ComputeResource], True)
view = o.view
o.Destroy()
for clu in view:
print("Cluster: %s" % clu.name)
for dts in clu.datastore:
print("Pool: %s" % dts.name)
def delete_image(self, image, pool=None):
si = self.si
vmFolder = self.dc.vmFolder
vm = findvm(si, vmFolder, image)
if vm is None or not vm.config.template:
return {'result': 'failure', 'reason': 'Image %s not found' % image}
else:
t = vm.Destroy_Task()
waitForMe(t)
return {'result': 'success'}
def export(self, name, image=None):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
vm.MarkAsTemplate()
if image is not None:
vm.Rename(image)
return {'result': 'success'}
def list_dns(self, domain):
return []
|
watcher.py
|
import dataclasses
import multiprocessing
from multiprocessing import Queue
import os
from pathlib import Path
import queue
import sys
import time
import signal
from typing import *
from crosshair.auditwall import engage_auditwall
from crosshair.auditwall import opened_auditwall
from crosshair.core_and_libs import analyze_module
from crosshair.core_and_libs import run_checkables
from crosshair.core_and_libs import AnalysisMessage
from crosshair.core_and_libs import MessageType
from crosshair.fnutil import walk_paths
from crosshair.options import AnalysisOptionSet, AnalysisOptions
from crosshair.options import DEFAULT_OPTIONS
from crosshair.fnutil import NotFound
from crosshair.util import debug
from crosshair.util import load_file
from crosshair.util import set_debug
from crosshair.util import CrosshairInternal
from crosshair.util import ErrorDuringImport
# Use "spawn" in stead of fork() because we've already imported the code we're watching:
multiproc_spawn = multiprocessing.get_context("spawn")
def mtime(path: Path) -> Optional[float]:
try:
return path.stat().st_mtime
except FileNotFoundError:
return None
WorkItemInput = Tuple[Path, AnalysisOptionSet, float] # (file, opts, deadline)
WorkItemOutput = Tuple[Path, Counter[str], List[AnalysisMessage]]
def import_error_msg(err: ErrorDuringImport) -> AnalysisMessage:
cause = err.__cause__ if err.__cause__ else err
tb = cause.__traceback__
if tb:
filename, line = tb.tb_frame.f_code.co_filename, tb.tb_lineno
else:
filename, line = "<unknown>", 0
return AnalysisMessage(MessageType.IMPORT_ERR, str(cause), filename, line, 0, "")
def pool_worker_process_item(
item: WorkItemInput,
) -> Tuple[Counter[str], List[AnalysisMessage]]:
filename, options, deadline = item
stats: Counter[str] = Counter()
options.stats = stats
try:
module = load_file(str(filename))
except NotFound as e:
debug(f'Not analyzing "{filename}" because sub-module import failed: {e}')
return (stats, [])
except ErrorDuringImport as e:
debug(f'Not analyzing "{filename}" because import failed: {e}')
return (stats, [import_error_msg(e)])
messages = run_checkables(analyze_module(module, options))
return (stats, messages)
def pool_worker_main(item: WorkItemInput, output: "Queue[WorkItemOutput]") -> None:
filename = item[0]
try:
# TODO figure out a more reliable way to suppress this. Redirect output?
# Ignore ctrl-c in workers to reduce noisy tracebacks (the parent will kill us):
signal.signal(signal.SIGINT, signal.SIG_IGN)
if hasattr(os, "nice"): # analysis should run at a low priority
os.nice(10)
set_debug(False)
engage_auditwall()
(stats, messages) = pool_worker_process_item(item)
output.put((filename, stats, messages))
except BaseException as e:
raise CrosshairInternal("Worker failed while analyzing " + str(filename)) from e
class Pool:
_workers: List[Tuple[multiprocessing.Process, WorkItemInput]]
_work: List[WorkItemInput]
_results: "Queue[WorkItemOutput]"
_max_processes: int
def __init__(self, max_processes: int) -> None:
self._workers = []
self._work = []
self._results = multiproc_spawn.Queue()
self._max_processes = max_processes
def _spawn_workers(self):
work_list = self._work
workers = self._workers
while work_list and len(self._workers) < self._max_processes:
work_item = work_list.pop()
with opened_auditwall():
process = multiproc_spawn.Process(
target=pool_worker_main, args=(work_item, self._results)
)
workers.append((process, work_item))
process.start()
def _prune_workers(self, curtime):
for worker, item in self._workers:
(_, _, deadline) = item
if worker.is_alive() and curtime > deadline:
debug("Killing worker over deadline", worker)
with opened_auditwall():
worker.terminate()
time.sleep(0.5)
if worker.is_alive():
worker.kill()
worker.join()
self._workers = [(w, i) for w, i in self._workers if w.is_alive()]
def terminate(self):
self._prune_workers(float("+inf"))
self._work = []
def garden_workers(self):
self._prune_workers(time.time())
self._spawn_workers()
def is_working(self):
return self._workers or self._work
def submit(self, item: WorkItemInput) -> None:
self._work.append(item)
def has_result(self):
return not self._results.empty()
def get_result(self, timeout: float) -> Optional[WorkItemOutput]:
try:
return self._results.get(timeout=timeout)
except queue.Empty:
return None
def worker_initializer():
"""Ignore CTRL+C in the worker process."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
class Watcher:
_paths: Set[Path]
_pool: Pool
_modtimes: Dict[Path, float]
_options: AnalysisOptionSet
_next_file_check: float = 0.0
_change_flag: bool = False
def __init__(
self, files: Iterable[Path], options: AnalysisOptionSet = AnalysisOptionSet()
):
self._paths = set(files)
self._pool = self.startpool()
self._modtimes = {}
self._options = options
try:
# just to force an exit if we can't find a path:
list(walk_paths(self._paths))
except FileNotFoundError as exc:
print(f'Watch path "{exc.args[0]}" does not exist.', file=sys.stderr)
sys.exit(2)
def startpool(self) -> Pool:
return Pool(multiprocessing.cpu_count() - 1)
def run_iteration(
self, max_condition_timeout=0.5
) -> Iterator[Tuple[Counter[str], List[AnalysisMessage]]]:
debug(f"starting pass with a condition timeout of {max_condition_timeout}")
debug("Files:", self._modtimes.keys())
pool = self._pool
for filename in self._modtimes.keys():
worker_timeout = max(10.0, max_condition_timeout * 100.0)
iter_options = AnalysisOptionSet(
per_condition_timeout=max_condition_timeout,
per_path_timeout=max_condition_timeout / 4,
)
options = self._options.overlay(iter_options)
pool.submit((filename, options, time.time() + worker_timeout))
pool.garden_workers()
while pool.is_working():
result = pool.get_result(timeout=1.0)
if result is not None:
(_, counters, messages) = result
yield (counters, messages)
if pool.has_result():
continue
if time.time() >= self._next_file_check:
self._next_file_check = time.time() + 1.0
if self.check_changed():
self._change_flag = True
debug("Aborting iteration on change detection")
pool.terminate()
yield (Counter(), []) # to break the parent from waiting
self._pool = self.startpool()
return
pool.garden_workers()
debug("Worker pool tasks complete")
def check_changed(self) -> bool:
unchecked_modtimes = self._modtimes.copy()
changed = False
for curfile in walk_paths(self._paths):
cur_mtime = mtime(curfile)
if cur_mtime is None:
# Unlikely; race condition on an interleaved file delete
continue
if cur_mtime == unchecked_modtimes.pop(curfile, None):
continue
changed = True
self._modtimes[curfile] = cur_mtime
if unchecked_modtimes:
# Files known but not found; something was deleted
changed = True
for delfile in unchecked_modtimes.keys():
del self._modtimes[delfile]
return changed
|
pubsub.py
|
import os, threading
class PublishSubscribeBroker:
"""
Diese Klasse implementiert einen einfachen lokalen Message Broker
zur Umsetzung des Publish/Subscribe (oder auch Observer) Patterns.
Beliebige Threads können üb er die publish()-Methode beliebige
Nachrichten an beliebige Topics senden, wobei ein Topic hierbei
einfach durch seinen Namen identifiziert wird und eine Nachricht
aus den Positionsparametern (*args) und Namensparametern (**kwargs)
der publish()-Methode besteht.
Empfänger kann jede Methode sein, welche die *args und **kwargs
der gesendeten Nachrichten verarbeiten kann. Hierfür können mit
die Empfängermethoden mit subscribe() und unsubscribe() den Topics
zugeordnet werden. Die Empfängermethoden werden dabei stets in
einem eigenen Thread ausgeführt, um sie somit von den Senderthreads
zu entkoppeln.
"""
def __init__(self, threads=os.cpu_count()):
"""
Konstruktor,
@param threads: Größe des Threadpools (Default = os.cpu_count())
"""
self._topics = {}
def subscribe(self, topic, subscriber):
"""
Hinzufügen einer Empfängermethode zu einem Topic.
@param topic: Name des Topics
@param subscriber: Empfänger-Methode
"""
if not topic in self._topics:
self._topics[topic] = []
self._topics[topic].append(subscriber)
def unsubscribe(self, topic, subscriber):
"""
Entfernen einer Empfängermethode von einem Topic.
@param topic: Name des Topics
@param subscriber: Empfänger-Methode
"""
if topic in self._topics:
self._topics[topic].remove(subscriber)
def publish(self, topic, *args, **kwargs):
"""
Senden einer Nachricht.
@param topic: Name des Topics
@param *args: Beliebige Positionsparameter gemäß Python-Konventionen
@param **kwargs: Beliebige Namensparameter gemäß Python-Konventionen
"""
if not topic in self._topics:
return
for subscriber in self._topics[topic]:
thread = threading.Thread(target=subscriber, args=args, kwargs=kwargs)
thread.start()
|
background.py
|
"""Helpers functions to run log-running tasks."""
from web import utils
from web import webapi as web
def background(func):
"""A function decorator to run a long-running function as a background thread."""
def internal(*a, **kw):
web.data() # cache it
tmpctx = web._context[threading.currentThread()]
web._context[threading.currentThread()] = utils.storage(web.ctx.copy())
def newfunc():
web._context[threading.currentThread()] = tmpctx
func(*a, **kw)
myctx = web._context[threading.currentThread()]
for k in myctx.keys():
if k not in ['status', 'headers', 'output']:
try: del myctx[k]
except KeyError: pass
t = threading.Thread(target=newfunc)
background.threaddb[id(t)] = t
t.start()
web.ctx.headers = []
return seeother(changequery(_t=id(t)))
return internal
background.threaddb = {}
def backgrounder(func):
def internal(*a, **kw):
i = web.input(_method='get')
if '_t' in i:
try:
t = background.threaddb[int(i._t)]
except KeyError:
return web.notfound()
web._context[threading.currentThread()] = web._context[t]
return
else:
return func(*a, **kw)
return internal
|
test_multithread.py
|
# -*- coding: utf-8 -*-
import threading
import time
def print_workload(node, tasks):
print "node id=%d, tasks=%d" % (node, tasks)
def process(node, tasks):
print_workload(node, tasks)
while tasks > 0:
tasks -= 1
print_workload(node, tasks)
time.sleep(0.2)
for i in range(1, 6):
t = threading.Thread(target=process, args=(i, i*5))
t.start()
print "thread processing..."
t.join()
print "threading done"
|
test_new_kvstore.py
|
import os
import time
import numpy as np
import socket
from scipy import sparse as spsp
import dgl
import backend as F
import unittest, pytest
from dgl.graph_index import create_graph_index
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
# Create an one-part Graph
node_map = F.tensor([0,0,0,0,0,0], F.int64)
edge_map = F.tensor([0,0,0,0,0,0,0], F.int64)
global_nid = F.tensor([0,1,2,3,4,5], F.int64)
global_eid = F.tensor([0,1,2,3,4,5,6], F.int64)
g = dgl.DGLGraph()
g.add_nodes(6)
g.add_edges(0, 1) # 0
g.add_edges(0, 2) # 1
g.add_edges(0, 3) # 2
g.add_edges(2, 3) # 3
g.add_edges(1, 1) # 4
g.add_edges(0, 4) # 5
g.add_edges(2, 5) # 6
g.ndata[dgl.NID] = global_nid
g.edata[dgl.EID] = global_eid
gpb = dgl.distributed.GraphPartitionBook(part_id=0,
num_parts=1,
node_map=node_map,
edge_map=edge_map,
part_graph=g)
node_policy = dgl.distributed.PartitionPolicy(policy_str='node',
partition_book=gpb)
edge_policy = dgl.distributed.PartitionPolicy(policy_str='edge',
partition_book=gpb)
data_0 = F.tensor([[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.]], F.float32)
data_0_1 = F.tensor([1.,2.,3.,4.,5.,6.], F.float32)
data_0_2 = F.tensor([1,2,3,4,5,6], F.int32)
data_0_3 = F.tensor([1,2,3,4,5,6], F.int64)
data_1 = F.tensor([[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.]], F.float32)
data_2 = F.tensor([[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.]], F.float32)
def init_zero_func(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def udf_push(target, name, id_tensor, data_tensor):
target[name][id_tensor] = data_tensor * data_tensor
def add_push(target, name, id_tensor, data_tensor):
target[name][id_tensor] += data_tensor
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_partition_policy():
assert node_policy.policy_str == 'node'
assert edge_policy.policy_str == 'edge'
assert node_policy.part_id == 0
assert edge_policy.part_id == 0
local_nid = node_policy.to_local(F.tensor([0,1,2,3,4,5]))
local_eid = edge_policy.to_local(F.tensor([0,1,2,3,4,5,6]))
assert_array_equal(F.asnumpy(local_nid), F.asnumpy(F.tensor([0,1,2,3,4,5], F.int64)))
assert_array_equal(F.asnumpy(local_eid), F.asnumpy(F.tensor([0,1,2,3,4,5,6], F.int64)))
nid_partid = node_policy.to_partid(F.tensor([0,1,2,3,4,5], F.int64))
eid_partid = edge_policy.to_partid(F.tensor([0,1,2,3,4,5,6], F.int64))
assert_array_equal(F.asnumpy(nid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0], F.int64)))
assert_array_equal(F.asnumpy(eid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0,0], F.int64)))
assert node_policy.get_data_size() == len(node_map)
assert edge_policy.get_data_size() == len(edge_map)
def start_server(server_id, num_clients):
# Init kvserver
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
kvserver = dgl.distributed.KVServer(server_id=server_id,
ip_config='kv_ip_config.txt',
num_clients=num_clients)
kvserver.add_part_policy(node_policy)
kvserver.add_part_policy(edge_policy)
if kvserver.is_backup_server():
kvserver.init_data('data_0', 'node')
kvserver.init_data('data_0_1', 'node')
kvserver.init_data('data_0_2', 'node')
kvserver.init_data('data_0_3', 'node')
else:
kvserver.init_data('data_0', 'node', data_0)
kvserver.init_data('data_0_1', 'node', data_0_1)
kvserver.init_data('data_0_2', 'node', data_0_2)
kvserver.init_data('data_0_3', 'node', data_0_3)
# start server
server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None)
dgl.distributed.start_server(server_id=server_id,
ip_config='kv_ip_config.txt',
num_clients=num_clients,
server_state=server_state)
def start_server_mul_role(server_id, num_clients):
# Init kvserver
kvserver = dgl.distributed.KVServer(server_id=server_id,
ip_config='kv_ip_mul_config.txt',
num_clients=num_clients)
kvserver.add_part_policy(node_policy)
if kvserver.is_backup_server():
kvserver.init_data('data_0', 'node')
else:
kvserver.init_data('data_0', 'node', data_0)
# start server
server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None)
dgl.distributed.start_server(server_id=server_id,
ip_config='kv_ip_mul_config.txt',
num_clients=num_clients,
server_state=server_state)
def start_client(num_clients):
# Note: connect to server first !
dgl.distributed.connect_to_server(ip_config='kv_ip_config.txt')
# Init kvclient
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_config.txt')
kvclient.map_shared_data(partition_book=gpb)
assert dgl.distributed.get_num_client() == num_clients
kvclient.init_data(name='data_1',
shape=F.shape(data_1),
dtype=F.dtype(data_1),
part_policy=edge_policy,
init_func=init_zero_func)
kvclient.init_data(name='data_2',
shape=F.shape(data_2),
dtype=F.dtype(data_2),
part_policy=node_policy,
init_func=init_zero_func)
# Test data_name_list
name_list = kvclient.data_name_list()
print(name_list)
assert 'data_0' in name_list
assert 'data_0_1' in name_list
assert 'data_0_2' in name_list
assert 'data_0_3' in name_list
assert 'data_1' in name_list
assert 'data_2' in name_list
# Test get_meta_data
meta = kvclient.get_data_meta('data_0')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0)
assert shape == F.shape(data_0)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_1')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_1)
assert shape == F.shape(data_0_1)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_2')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_2)
assert shape == F.shape(data_0_2)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_3')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_3)
assert shape == F.shape(data_0_3)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_1')
dtype, shape, policy = meta
assert dtype == F.dtype(data_1)
assert shape == F.shape(data_1)
assert policy.policy_str == 'edge'
meta = kvclient.get_data_meta('data_2')
dtype, shape, policy = meta
assert dtype == F.dtype(data_2)
assert shape == F.shape(data_2)
assert policy.policy_str == 'node'
# Test push and pull
id_tensor = F.tensor([0,2,4], F.int64)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
kvclient.push(name='data_0',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_1',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_2',
id_tensor=id_tensor,
data_tensor=data_tensor)
res = kvclient.pull(name='data_0', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_1', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_2', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
# Register new push handler
kvclient.register_push_handler('data_0', udf_push)
kvclient.register_push_handler('data_1', udf_push)
kvclient.register_push_handler('data_2', udf_push)
# Test push and pull
kvclient.push(name='data_0',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_1',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_2',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.barrier()
data_tensor = data_tensor * data_tensor
res = kvclient.pull(name='data_0', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_1', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_2', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
# Test delete data
kvclient.delete_data('data_0')
kvclient.delete_data('data_1')
kvclient.delete_data('data_2')
# Register new push handler
kvclient.init_data(name='data_3',
shape=F.shape(data_2),
dtype=F.dtype(data_2),
part_policy=node_policy,
init_func=init_zero_func)
kvclient.register_push_handler('data_3', add_push)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
kvclient.barrier()
time.sleep(kvclient.client_id + 1)
print("add...")
kvclient.push(name='data_3',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.barrier()
res = kvclient.pull(name='data_3', id_tensor=id_tensor)
data_tensor = data_tensor * num_clients
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
def start_client_mul_role(i, num_clients):
# Note: connect to server first !
dgl.distributed.connect_to_server(ip_config='kv_ip_mul_config.txt')
# Init kvclient
if i % 2 == 0:
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_mul_config.txt', role='trainer')
else:
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_mul_config.txt', role='sampler')
if i == 2: # block one trainer
time.sleep(5)
kvclient.barrier()
print("i: %d role: %s" % (i, kvclient.role))
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_kv_store():
ip_config = open("kv_ip_config.txt", "w")
num_servers = 2
num_clients = 2
ip_addr = get_local_usable_addr()
ip_config.write('{} {}\n'.format(ip_addr, num_servers))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
pclient_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server, args=(i, num_clients))
pserver.start()
pserver_list.append(pserver)
for i in range(num_clients):
pclient = ctx.Process(target=start_client, args=(num_clients,))
pclient.start()
pclient_list.append(pclient)
for i in range(num_clients):
pclient_list[i].join()
for i in range(num_servers):
pserver_list[i].join()
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_kv_multi_role():
ip_config = open("kv_ip_mul_config.txt", "w")
num_servers = 2
num_clients = 10
ip_addr = get_local_usable_addr()
ip_config.write('{} {}\n'.format(ip_addr, num_servers))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
pclient_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server_mul_role, args=(i, num_clients))
pserver.start()
pserver_list.append(pserver)
for i in range(num_clients):
pclient = ctx.Process(target=start_client_mul_role, args=(i, num_clients))
pclient.start()
pclient_list.append(pclient)
for i in range(num_clients):
pclient_list[i].join()
for i in range(num_servers):
pserver_list[i].join()
if __name__ == '__main__':
test_partition_policy()
test_kv_store()
test_kv_multi_role()
|
plugin_event_multiplexer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from six.moves import queue, xrange # pylint: disable=redefined-builtin
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@Tensors
"""
def __init__(self,
run_path_map=None,
size_guidance=None,
tensor_size_guidance=None,
purge_orphaned_data=True,
max_reload_threads=None,
event_file_active_filter=None):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
tensor_size_guidance: A dictionary mapping from `plugin_name` to
the number of items to store for each tag of that type. See
`event_accumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
max_reload_threads: The max number of threads that TensorBoard can use
to reload runs. Each thread reloads one run at a time. If not provided,
reloads runs serially (one after another).
event_file_active_filter: Optional predicate for determining whether an
event file latest load timestamp should be considered active. If passed,
this will enable multifile directory loading.
"""
logger.info('Event Multiplexer initializing.')
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = (size_guidance or
event_accumulator.DEFAULT_SIZE_GUIDANCE)
self._tensor_size_guidance = tensor_size_guidance
self.purge_orphaned_data = purge_orphaned_data
self._max_reload_threads = max_reload_threads or 1
self._event_file_active_filter = event_file_active_filter
if run_path_map is not None:
logger.info('Event Multplexer doing initialization load for %s',
run_path_map)
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
logger.info('Event Multiplexer done initializing')
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
name = name or path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(@decentralion) - Make it impossible to overwrite an old path
# with a new path (just give the new path a distinct name)
logger.warn('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logger.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
tensor_size_guidance=self._tensor_size_guidance,
purge_orphaned_data=self.purge_orphaned_data,
event_file_active_filter=self._event_file_active_filter)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
logger.info('Starting AddRunsFromDirectory: %s', path)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Adding run from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
logger.info('Done with AddRunsFromDirectory: %s', path)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
logger.info('Beginning EventMultiplexer.Reload()')
self._reload_called = True
# Build a list so we're safe even if the list of accumulators is modified
# even while we're reloading.
with self._accumulators_mutex:
items = list(self._accumulators.items())
items_queue = queue.Queue()
for item in items:
items_queue.put(item)
# Methods of built-in python containers are thread-safe so long as the GIL
# for the thread exists, but we might as well be careful.
names_to_delete = set()
names_to_delete_mutex = threading.Lock()
def Worker():
"""Keeps reloading accumulators til none are left."""
while True:
try:
name, accumulator = items_queue.get(block=False)
except queue.Empty:
# No more runs to reload.
break
try:
accumulator.Reload()
except (OSError, IOError) as e:
logger.error('Unable to reload accumulator %r: %s', name, e)
except directory_watcher.DirectoryDeletedError:
with names_to_delete_mutex:
names_to_delete.add(name)
finally:
items_queue.task_done()
if self._max_reload_threads > 1:
num_threads = min(
self._max_reload_threads, len(items))
logger.info('Starting %d threads to reload runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=Worker, name='Reloader %d' % i)
thread.daemon = True
thread.start()
items_queue.join()
else:
logger.info(
'Reloading runs serially (one after another) on the main '
'thread.')
Worker()
with self._accumulators_mutex:
for name in names_to_delete:
logger.warn('Deleting accumulator %r', name)
del self._accumulators[name]
logger.info('Finished with EventMultiplexer.Reload()')
return self
def PluginAssets(self, plugin_name):
"""Get index of runs and assets for a given plugin.
Args:
plugin_name: Name of the plugin we are checking for.
Returns:
A dictionary that maps from run_name to a list of plugin
assets for that run.
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run: accum.PluginAssets(plugin_name) for run, accum in items}
def RetrievePluginAsset(self, run, plugin_name, asset_name):
"""Return the contents for a specific plugin asset from a run.
Args:
run: The string name of the run.
plugin_name: The string name of a plugin.
asset_name: The string name of an asset.
Returns:
The string contents of the plugin asset.
Raises:
KeyError: If the asset is not available.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RetrievePluginAsset(plugin_name, asset_name)
def FirstEventTimestamp(self, run):
"""Return the timestamp of the first event of the given run.
This may perform I/O if no events have been loaded yet for the run.
Args:
run: A string name of the run for which the timestamp is retrieved.
Returns:
The wall_time of the first event of the run, which will typically be
seconds since the epoch.
Raises:
KeyError: If the run is not found.
ValueError: If the run has no events loaded and there are no events on
disk to load.
"""
accumulator = self.GetAccumulator(run)
return accumulator.FirstEventTimestamp()
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `GraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Graph()
def MetaGraph(self, run):
"""Retrieve the metagraph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
Returns:
The `MetaGraphDef` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.MetaGraph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self.GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Tensors(self, run, tag):
"""Retrieve the tensor events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
An array of `event_accumulator.TensorEvent`s.
"""
accumulator = self.GetAccumulator(run)
return accumulator.Tensors(tag)
def PluginRunToTagToContent(self, plugin_name):
"""Returns a 2-layer dictionary of the form {run: {tag: content}}.
The `content` referred above is the content field of the PluginData proto
for the specified plugin within a Summary.Value proto.
Args:
plugin_name: The name of the plugin for which to fetch content.
Returns:
A dictionary of the form {run: {tag: content}}.
"""
mapping = {}
for run in self.Runs():
try:
tag_to_content = self.GetAccumulator(run).PluginTagToContent(
plugin_name)
except KeyError:
# This run lacks content for the plugin. Try the next run.
continue
mapping[run] = tag_to_content
return mapping
def SummaryMetadata(self, run, tag):
"""Return the summary metadata for the given tag on the given run.
Args:
run: A string name of the run for which summary metadata is to be
retrieved.
tag: A string name of the tag whose summary metadata is to be
retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
Returns:
A `SummaryMetadata` protobuf.
"""
accumulator = self.GetAccumulator(run)
return accumulator.SummaryMetadata(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { scalarValues: [tagA, tagB, tagC],
graph: true, meta_graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def RunPaths(self):
"""Returns a dict mapping run names to event file paths."""
return self._paths
def GetAccumulator(self, run):
"""Returns EventAccumulator for a given run.
Args:
run: String name of run.
Returns:
An EventAccumulator object.
Raises:
KeyError: If run does not exist.
"""
with self._accumulators_mutex:
return self._accumulators[run]
|
thread.py
|
from concurrent.futures import ThreadPoolExecutor
import threading
pool = ThreadPoolExecutor(max_workers=10)
def run_in_thread(*actions):
threading.Thread(target=lambda x: run_actions(*x), daemon=True, args=(actions,)).start()
# pool.submit(run_actions, *actions)
def run_actions(*lambda_args):
raise NotImplementedError
from recognition.actions import perform
for lambda_arg in lambda_args:
perform.perform_io(lambda_arg())
|
main.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import threading
import numpy as np
import signal
import random
import math
import os
import time
from environment.environment import Environment
from model.model import UnrealModel
from train.trainer import Trainer
from train.rmsprop_applier import RMSPropApplier
from constants import *
def log_uniform(lo, hi, rate):
log_lo = math.log(lo)
log_hi = math.log(hi)
v = log_lo * (1-rate) + log_hi * rate
return math.exp(v)
device = "/cpu:0"
if USE_GPU:
device = "/gpu:0"
initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW,
INITIAL_ALPHA_HIGH,
INITIAL_ALPHA_LOG_RATE)
global_t = 0
stop_requested = False
terminate_reqested = False
action_size = Environment.get_action_size()
global_network = UnrealModel(action_size, -1, device)
trainers = []
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
for i in range(PARALLEL_SIZE):
trainer = Trainer(i,
global_network,
initial_learning_rate,
learning_rate_input,
grad_applier,
MAX_TIME_STEP,
device = device)
trainers.append(trainer)
# prepare session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# summary for tensorboard
score_input = tf.placeholder(tf.int32)
tf.summary.scalar("score", score_input)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
# init or load checkpoint with saver
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded:", checkpoint.model_checkpoint_path)
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
global_t = int(tokens[1])
print(">>> global step set: ", global_t)
# set wall time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'r') as f:
wall_t = float(f.read())
next_save_steps = (global_t + SAVE_INTERVAL_STEP) // SAVE_INTERVAL_STEP * SAVE_INTERVAL_STEP
else:
print("Could not find old checkpoint")
# set wall time
wall_t = 0.0
next_save_steps = SAVE_INTERVAL_STEP
def save(current_global_step):
""" Save checkpoint.
Called from therad-0.
"""
global next_save_steps
global train_threads
global trainers
global saver
global stop_requested
stop_requested = True
# Wait for all other threads to stop
for (i, t) in enumerate(train_threads):
if i != 0:
t.join()
# Save
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
# Write wall time
wall_t = time.time() - start_time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'w') as f:
f.write(str(wall_t))
print('Start saving.')
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
print('End saving.')
stop_requested = False
next_save_steps += SAVE_INTERVAL_STEP
# Restart other threads
for i in range(PARALLEL_SIZE):
if i != 0:
thread = threading.Thread(target=train_function, args=(i,))
train_threads[i] = thread
thread.start()
def train_function(parallel_index):
""" Train each environment. """
global global_t
trainer = trainers[parallel_index]
# set start_time
start_time = time.time() - wall_t
trainer.set_start_time(start_time)
while True:
if stop_requested:
break
if terminate_reqested:
break
if global_t > MAX_TIME_STEP:
break
if parallel_index == 0 and global_t > next_save_steps:
# Save checkpoint
save(global_t)
diff_global_t = trainer.process(sess, global_t, summary_writer,
summary_op, score_input)
global_t += diff_global_t
def signal_handler(signal, frame):
global terminate_reqested
print('You pressed Ctrl+C!')
terminate_reqested = True
train_threads = []
for i in range(PARALLEL_SIZE):
train_threads.append(threading.Thread(target=train_function, args=(i,)))
signal.signal(signal.SIGINT, signal_handler)
# set start time
start_time = time.time() - wall_t
for t in train_threads:
t.start()
print('Press Ctrl+C to stop')
signal.pause()
|
youtube.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
import copy
import json
import re
import threading
import traceback
import requests
from .login_client import LoginClient
from ..helper.video_info import VideoInfo
from ..helper.utils import get_shelf_index_by_title
from ...kodion import constants
from ...kodion import Context
from ...kodion.utils import datetime_parser
_context = Context(plugin_id='plugin.video.youtube')
class YouTube(LoginClient):
def __init__(self, config=None, language='en-US', region='US', items_per_page=50, access_token='', access_token_tv=''):
if config is None:
config = {}
LoginClient.__init__(self, config=config, language=language, region=region, access_token=access_token,
access_token_tv=access_token_tv)
self._max_results = items_per_page
def get_max_results(self):
return self._max_results
def get_language(self):
return self._language
def get_region(self):
return self._region
@staticmethod
def calculate_next_page_token(page, max_result):
page -= 1
low = 'AEIMQUYcgkosw048'
high = 'ABCDEFGHIJKLMNOP'
len_low = len(low)
len_high = len(high)
position = page * max_result
overflow_token = 'Q'
if position >= 128:
overflow_token_iteration = position // 128
overflow_token = '%sE' % high[overflow_token_iteration]
low_iteration = position % len_low
# at this position the iteration starts with 'I' again (after 'P')
if position >= 256:
multiplier = (position // 128) - 1
position -= 128 * multiplier
high_iteration = (position // len_low) % len_high
return 'C%s%s%sAA' % (high[high_iteration], low[low_iteration], overflow_token)
def update_watch_history(self, video_id, url):
headers = {'Host': 'www.youtube.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36',
'Accept': 'image/webp,*/*;q=0.8',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
params = {'noflv': '1',
'html5': '1',
'video_id': video_id,
'referrer': '',
'eurl': 'https://www.youtube.com/tv#/watch?v=%s' % video_id,
'skl': 'false',
'ns': 'yt',
'el': 'leanback',
'ps': 'leanback'}
if self._access_token:
params['access_token'] = self._access_token
try:
_ = requests.get(url, params=params, headers=headers, verify=self._verify, allow_redirects=True)
except:
_context.log_error('Failed to update watch history |%s|' % traceback.print_exc())
def get_video_streams(self, context, video_id=None, player_config=None, cookies=None):
video_info = VideoInfo(context, access_token=self._access_token, language=self._language)
video_streams = video_info.load_stream_infos(video_id, player_config, cookies)
# update title
for video_stream in video_streams:
title = '%s (%s)' % (context.get_ui().bold(video_stream['title']), video_stream['container'])
if 'audio' in video_stream and 'video' in video_stream:
if video_stream['audio']['bitrate'] > 0 and video_stream['video']['encoding'] and \
video_stream['audio']['encoding']:
title = '%s (%s; %s / %s@%d)' % (context.get_ui().bold(video_stream['title']),
video_stream['container'],
video_stream['video']['encoding'],
video_stream['audio']['encoding'],
video_stream['audio']['bitrate'])
elif video_stream['video']['encoding'] and video_stream['audio']['encoding']:
title = '%s (%s; %s / %s)' % (context.get_ui().bold(video_stream['title']),
video_stream['container'],
video_stream['video']['encoding'],
video_stream['audio']['encoding'])
elif 'audio' in video_stream and 'video' not in video_stream:
if video_stream['audio']['encoding'] and video_stream['audio']['bitrate'] > 0:
title = '%s (%s; %s@%d)' % (context.get_ui().bold(video_stream['title']),
video_stream['container'],
video_stream['audio']['encoding'],
video_stream['audio']['bitrate'])
elif 'audio' in video_stream or 'video' in video_stream:
encoding = video_stream.get('audio', dict()).get('encoding')
if not encoding:
encoding = video_stream.get('video', dict()).get('encoding')
if encoding:
title = '%s (%s; %s)' % (context.get_ui().bold(video_stream['title']),
video_stream['container'],
encoding)
video_stream['title'] = title
return video_streams
def remove_playlist(self, playlist_id):
params = {'id': playlist_id,
'mine': 'true'}
return self.perform_v3_request(method='DELETE', path='playlists', params=params)
def get_supported_languages(self, language=None):
_language = language
if not _language:
_language = self._language
_language = _language.replace('-', '_')
params = {'part': 'snippet',
'hl': _language}
return self.perform_v3_request(method='GET', path='i18nLanguages', params=params)
def get_supported_regions(self, language=None):
_language = language
if not _language:
_language = self._language
_language = _language.replace('-', '_')
params = {'part': 'snippet',
'hl': _language}
return self.perform_v3_request(method='GET', path='i18nRegions', params=params)
def rename_playlist(self, playlist_id, new_title, privacy_status='private'):
params = {'part': 'snippet,id,status'}
post_data = {'kind': 'youtube#playlist',
'id': playlist_id,
'snippet': {'title': new_title},
'status': {'privacyStatus': privacy_status}}
return self.perform_v3_request(method='PUT', path='playlists', params=params, post_data=post_data)
def create_playlist(self, title, privacy_status='private'):
params = {'part': 'snippet,status'}
post_data = {'kind': 'youtube#playlist',
'snippet': {'title': title},
'status': {'privacyStatus': privacy_status}}
return self.perform_v3_request(method='POST', path='playlists', params=params, post_data=post_data)
def get_video_rating(self, video_id):
if isinstance(video_id, list):
video_id = ','.join(video_id)
params = {'id': video_id}
return self.perform_v3_request(method='GET', path='videos/getRating', params=params)
def rate_video(self, video_id, rating='like'):
"""
Rate a video
:param video_id: if of the video
:param rating: [like|dislike|none]
:return:
"""
params = {'id': video_id,
'rating': rating}
return self.perform_v3_request(method='POST', path='videos/rate', params=params)
def add_video_to_playlist(self, playlist_id, video_id):
params = {'part': 'snippet',
'mine': 'true'}
post_data = {'kind': 'youtube#playlistItem',
'snippet': {'playlistId': playlist_id,
'resourceId': {'kind': 'youtube#video',
'videoId': video_id}}}
return self.perform_v3_request(method='POST', path='playlistItems', params=params, post_data=post_data)
# noinspection PyUnusedLocal
def remove_video_from_playlist(self, playlist_id, playlist_item_id):
params = {'id': playlist_item_id}
return self.perform_v3_request(method='DELETE', path='playlistItems', params=params)
def unsubscribe(self, subscription_id):
params = {'id': subscription_id}
return self.perform_v3_request(method='DELETE', path='subscriptions', params=params)
def subscribe(self, channel_id):
params = {'part': 'snippet'}
post_data = {'kind': 'youtube#subscription',
'snippet': {'resourceId': {'kind': 'youtube#channel',
'channelId': channel_id}}}
return self.perform_v3_request(method='POST', path='subscriptions', params=params, post_data=post_data)
def get_subscription(self, channel_id, order='alphabetical', page_token=''):
"""
:param channel_id: [channel-id|'mine']
:param order: ['alphabetical'|'relevance'|'unread']
:param page_token:
:return:
"""
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'order': order}
if channel_id == 'mine':
params['mine'] = 'true'
else:
params['channelId'] = channel_id
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='subscriptions', params=params)
def get_guide_category(self, guide_category_id, page_token=''):
params = {'part': 'snippet,contentDetails,brandingSettings',
'maxResults': str(self._max_results),
'categoryId': guide_category_id,
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='channels', params=params)
def get_guide_categories(self, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='guideCategories', params=params)
def get_popular_videos(self, page_token=''):
params = {'part': 'snippet,status',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language,
'chart': 'mostPopular'}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='videos', params=params)
def get_video_category(self, video_category_id, page_token=''):
params = {'part': 'snippet,contentDetails,status',
'maxResults': str(self._max_results),
'videoCategoryId': video_category_id,
'chart': 'mostPopular',
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='videos', params=params)
def get_video_categories(self, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='videoCategories', params=params)
def _get_recommendations_for_home(self):
# YouTube has deprecated this API, so use history and related items to form
# a recommended set. We cache aggressively because searches incur a high
# quota cost of 100 on the YouTube API.
# Note this is a first stab attempt and can be refined a lot more.
cache = _context.get_data_cache()
# Do we have a cached result?
cache_home_key = 'get-activities-home'
cached = cache.get_item(cache.ONE_HOUR * 4, cache_home_key)
if cache_home_key in cached and cached[cache_home_key].get('items'):
return cached[cache_home_key]
# Fetch existing list of items, if any
items = []
cache_items_key = 'get-activities-home-items'
cached = cache.get_item(cache.ONE_WEEK * 2, cache_items_key)
if cache_items_key in cached:
items = cached[cache_items_key]
# Fetch history and recommended items. Use threads for faster execution.
def helper(video_id, responses):
_context.log_debug(
'Method get_activities: doing expensive API fetch for related'
'items for video %s' % video_id
)
di = self.get_related_videos(video_id, max_results=10)
if 'items' in di:
# Record for which video we fetched the items
for item in di['items']:
item['plugin_fetched_for'] = video_id
responses.extend(di['items'])
history = self.get_watch_history()
result = {
'kind': 'youtube#activityListResponse',
'items': []
}
if not history.get('items'):
return result
threads = []
candidates = []
already_fetched_for_video_ids = [item['plugin_fetched_for'] for item in items]
history_items = [item for item in history['items']
if re.match(r'(?P<video_id>[\w-]{11})', item['id'])]
# TODO:
# It would be nice to make this 8 user configurable
for item in history_items[:8]:
video_id = item['id']
if video_id not in already_fetched_for_video_ids:
thread = threading.Thread(target=helper, args=(video_id, candidates))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# Prepend new candidates to items
seen = [item['id']['videoId'] for item in items]
for candidate in candidates:
vid = candidate['id']['videoId']
if vid not in seen:
seen.append(vid)
candidate['plugin_created_date'] = datetime_parser.now().strftime('%Y-%m-%dT%H:%M:%SZ')
items.insert(0, candidate)
# Truncate items to keep it manageable, and cache
items = items[:500]
cache.set(cache_items_key, json.dumps(items))
# Build the result set
items.sort(
key=lambda a: datetime_parser.parse(a['plugin_created_date']),
reverse=True
)
sorted_items = []
counter = 0
channel_counts = {}
while items:
counter += 1
# Hard stop on iteration. Good enough for our purposes.
if counter >= 1000:
break
# Reset channel counts on a new page
if counter % 50 == 0:
channel_counts = {}
# Ensure a single channel isn't hogging the page
item = items.pop()
channel_id = item['snippet']['channelId']
channel_counts.setdefault(channel_id, 0)
if channel_counts[channel_id] <= 3:
# Use the item
channel_counts[channel_id] = channel_counts[channel_id] + 1
item["page_number"] = counter // 50
sorted_items.append(item)
else:
# Move the item to the end of the list
items.append(item)
# Finally sort items per page by date for a better distribution
now = datetime_parser.now()
sorted_items.sort(
key=lambda a: (
a['page_number'],
datetime_parser.total_seconds(
now - datetime_parser.parse(a['snippet']['publishedAt'])
)
),
)
# Finalize result
result['items'] = sorted_items
"""
# TODO:
# Enable pagination
result['pageInfo'] = {
'resultsPerPage': 50,
'totalResults': len(sorted_items)
}
"""
# Update cache
cache.set(cache_home_key, json.dumps(result))
# If there are no sorted_items we fall back to default API behaviour
return result
def get_activities(self, channel_id, page_token=''):
params = {'part': 'snippet,contentDetails',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if channel_id == 'home':
recommended = self._get_recommendations_for_home()
if 'items' in recommended and recommended.get('items'):
return recommended
if channel_id == 'home':
params['home'] = 'true'
elif channel_id == 'mine':
params['mine'] = 'true'
else:
params['channelId'] = channel_id
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='activities', params=params)
def get_channel_sections(self, channel_id):
params = {'part': 'snippet,contentDetails',
'regionCode': self._region,
'hl': self._language}
if channel_id == 'mine':
params['mine'] = 'true'
else:
params['channelId'] = channel_id
return self.perform_v3_request(method='GET', path='channelSections', params=params)
def get_playlists_of_channel(self, channel_id, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results)}
if channel_id != 'mine':
params['channelId'] = channel_id
else:
params['mine'] = 'true'
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='playlists', params=params)
def get_playlist_item_id_of_video_id(self, playlist_id, video_id, page_token=''):
old_max_results = self._max_results
self._max_results = 50
json_data = self.get_playlist_items(playlist_id=playlist_id, page_token=page_token)
self._max_results = old_max_results
items = json_data.get('items', [])
for item in items:
playlist_item_id = item['id']
playlist_video_id = item.get('snippet', {}).get('resourceId', {}).get('videoId', '')
if playlist_video_id and playlist_video_id == video_id:
return playlist_item_id
next_page_token = json_data.get('nextPageToken', '')
if next_page_token:
return self.get_playlist_item_id_of_video_id(playlist_id=playlist_id, video_id=video_id,
page_token=next_page_token)
return None
def get_playlist_items(self, playlist_id, page_token='', max_results=None):
# prepare params
max_results = str(self._max_results) if max_results is None else str(max_results)
params = {'part': 'snippet',
'maxResults': max_results,
'playlistId': playlist_id}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='playlistItems', params=params)
def get_channel_by_username(self, username):
"""
Returns a collection of zero or more channel resources that match the request criteria.
:param username: retrieve channel_id for username
:return:
"""
params = {'part': 'id'}
if username == 'mine':
params.update({'mine': 'true'})
else:
params.update({'forUsername': username})
return self.perform_v3_request(method='GET', path='channels', params=params)
def get_channels(self, channel_id):
"""
Returns a collection of zero or more channel resources that match the request criteria.
:param channel_id: list or comma-separated list of the YouTube channel ID(s)
:return:
"""
if isinstance(channel_id, list):
channel_id = ','.join(channel_id)
params = {'part': 'snippet,contentDetails,brandingSettings'}
if channel_id != 'mine':
params['id'] = channel_id
else:
params['mine'] = 'true'
return self.perform_v3_request(method='GET', path='channels', params=params)
def get_disliked_videos(self, page_token=''):
# prepare page token
if not page_token:
page_token = ''
# prepare params
params = {'part': 'snippet,status',
'myRating': 'dislike',
'maxResults': str(self._max_results)}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='videos', params=params)
def get_videos(self, video_id, live_details=False):
"""
Returns a list of videos that match the API request parameters
:param video_id: list of video ids
:param live_details: also retrieve liveStreamingDetails
:return:
"""
if isinstance(video_id, list):
video_id = ','.join(video_id)
parts = ['snippet,contentDetails,status']
if live_details:
parts.append(',liveStreamingDetails')
params = {'part': ''.join(parts),
'id': video_id}
return self.perform_v3_request(method='GET', path='videos', params=params)
def get_playlists(self, playlist_id):
if isinstance(playlist_id, list):
playlist_id = ','.join(playlist_id)
params = {'part': 'snippet,contentDetails',
'id': playlist_id}
return self.perform_v3_request(method='GET', path='playlists', params=params)
def get_live_events(self, event_type='live', order='relevance', page_token='', location=False):
"""
:param event_type: one of: 'live', 'completed', 'upcoming'
:param order: one of: 'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount'
:param page_token:
:param location: bool, use geolocation
:return:
"""
# prepare page token
if not page_token:
page_token = ''
# prepare params
params = {'part': 'snippet',
'type': 'video',
'order': order,
'eventType': event_type,
'regionCode': self._region,
'hl': self._language,
'relevanceLanguage': self._language,
'maxResults': str(self._max_results)}
if location:
location = _context.get_settings().get_location()
if location:
params['location'] = location
params['locationRadius'] = _context.get_settings().get_location_radius()
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='search', params=params)
def get_related_videos(self, video_id, page_token='', max_results=0):
# prepare page token
if not page_token:
page_token = ''
max_results = self._max_results if max_results <= 0 else max_results
# prepare params
params = {'relatedToVideoId': video_id,
'part': 'snippet',
'type': 'video',
'regionCode': self._region,
'hl': self._language,
'maxResults': str(max_results)}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='search', params=params)
def get_parent_comments(self, video_id, page_token='', max_results=0):
max_results = self._max_results if max_results <= 0 else max_results
# prepare params
params = {'part': 'snippet',
'videoId': video_id,
'order': 'relevance',
'textFormat': 'plainText',
'maxResults': str(max_results)}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='commentThreads', params=params, no_login=True)
def get_child_comments(self, parent_id, page_token='', max_results=0):
max_results = self._max_results if max_results <= 0 else max_results
# prepare params
params = {'part': 'snippet',
'parentId': parent_id,
'textFormat': 'plainText',
'maxResults': str(max_results)}
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='comments', params=params, no_login=True)
def get_channel_videos(self, channel_id, page_token=''):
"""
Returns a collection of video search results for the specified channel_id
"""
params = {'part': 'snippet',
'hl': self._language,
'maxResults': str(self._max_results),
'type': 'video',
'safeSearch': 'none',
'order': 'date'}
if channel_id == 'mine':
params['forMine'] = 'true'
else:
params['channelId'] = channel_id
if page_token:
params['pageToken'] = page_token
return self.perform_v3_request(method='GET', path='search', params=params)
def search(self, q, search_type=None, event_type='', channel_id='',
order='relevance', safe_search='moderate', page_token='', location=False):
"""
Returns a collection of search results that match the query parameters specified in the API request. By default,
a search result set identifies matching video, channel, and playlist resources, but you can also configure
queries to only retrieve a specific type of resource.
:param q:
:param search_type: acceptable values are: 'video' | 'channel' | 'playlist'
:param event_type: 'live', 'completed', 'upcoming'
:param channel_id: limit search to channel id
:param order: one of: 'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount'
:param safe_search: one of: 'moderate', 'none', 'strict'
:param page_token: can be ''
:param location: bool, use geolocation
:return:
"""
if search_type is None:
search_type = ['video', 'channel', 'playlist']
# prepare search type
if not search_type:
search_type = ''
if isinstance(search_type, list):
search_type = ','.join(search_type)
# prepare page token
if not page_token:
page_token = ''
# prepare params
params = {'q': q,
'part': 'snippet',
'regionCode': self._region,
'hl': self._language,
'relevanceLanguage': self._language,
'maxResults': str(self._max_results)}
if event_type and event_type in ['live', 'upcoming', 'completed']:
params['eventType'] = event_type
if search_type:
params['type'] = search_type
if channel_id:
params['channelId'] = channel_id
if order:
params['order'] = order
if safe_search:
params['safeSearch'] = safe_search
if page_token:
params['pageToken'] = page_token
video_only_params = ['eventType', 'videoCaption', 'videoCategoryId', 'videoDefinition',
'videoDimension', 'videoDuration', 'videoEmbeddable', 'videoLicense',
'videoSyndicated', 'videoType', 'relatedToVideoId', 'forMine']
for key in video_only_params:
if params.get(key) is not None:
params['type'] = 'video'
break
if params['type'] == 'video' and location:
location = _context.get_settings().get_location()
if location:
params['location'] = location
params['locationRadius'] = _context.get_settings().get_location_radius()
return self.perform_v3_request(method='GET', path='search', params=params)
def get_my_subscriptions(self, page_token=None, offset=0):
if not page_token:
page_token = ''
result = {'items': [],
'next_page_token': page_token,
'offset': offset}
def _perform(_page_token, _offset, _result):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'FEsubscriptions'
}
if _page_token:
_post_data['continuation'] = _page_token
_json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data)
_data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get(
'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {})
if not _data:
_data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {})
_items = _data.get('items', [])
if not _result:
_result = {'items': []}
_new_offset = self._max_results - len(_result['items']) + _offset
if _offset > 0:
_items = _items[_offset:]
_result['offset'] = _new_offset
for _item in _items:
_item = _item.get('gridVideoRenderer', {})
if _item:
_video_item = {'id': _item['videoId'],
'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''),
'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')}
_result['items'].append(_video_item)
_continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '')
if _continuations and len(_result['items']) <= self._max_results:
_result['next_page_token'] = _continuations
if len(_result['items']) < self._max_results:
_result = _perform(_page_token=_continuations, _offset=0, _result=_result)
# trim result
if len(_result['items']) > self._max_results:
_items = _result['items']
_items = _items[:self._max_results]
_result['items'] = _items
_result['continue'] = True
if 'offset' in _result and _result['offset'] >= 100:
_result['offset'] -= 100
if len(_result['items']) < self._max_results:
if 'continue' in _result:
del _result['continue']
if 'next_page_token' in _result:
del _result['next_page_token']
if 'offset' in _result:
del _result['offset']
return _result
return _perform(_page_token=page_token, _offset=offset, _result=result)
def get_purchases(self, page_token, offset):
if not page_token:
page_token = ''
shelf_title = 'Purchases'
result = {'items': [],
'next_page_token': page_token,
'offset': offset}
def _perform(_page_token, _offset, _result, _shelf_index=None):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
}
}
if _page_token:
_post_data['continuation'] = _page_token
else:
_post_data['browseId'] = 'FEmy_youtube'
_json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data)
_data = {}
if 'continuationContents' in _json_data:
_data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {})
elif 'contents' in _json_data:
_contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])
if _shelf_index is None:
_shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title)
if _shelf_index is not None:
_data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {})
_items = _data.get('items', [])
if not _result:
_result = {'items': []}
_new_offset = self._max_results - len(_result['items']) + _offset
if _offset > 0:
_items = _items[_offset:]
_result['offset'] = _new_offset
for _listItem in _items:
_item = _listItem.get('gridVideoRenderer', {})
if _item:
_video_item = {'id': _item['videoId'],
'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''),
'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')}
_result['items'].append(_video_item)
_item = _listItem.get('gridPlaylistRenderer', {})
if _item:
play_next_page_token = ''
while True:
json_playlist_data = self.get_playlist_items(_item['playlistId'], page_token=play_next_page_token)
_playListItems = json_playlist_data.get('items', {})
for _playListItem in _playListItems:
_playListItem = _playListItem.get('snippet', {})
if _playListItem:
_video_item = {'id': _playListItem.get('resourceId', {}).get('videoId', ''),
'title': _playListItem['title'],
'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')}
_result['items'].append(_video_item)
play_next_page_token = json_playlist_data.get('nextPageToken', '')
if not play_next_page_token or _context.abort_requested():
break
_continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '')
if _continuations and len(_result['items']) <= self._max_results:
_result['next_page_token'] = _continuations
if len(_result['items']) < self._max_results:
_result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=shelf_index)
# trim result
if len(_result['items']) > self._max_results:
_items = _result['items']
_items = _items[:self._max_results]
_result['items'] = _items
_result['continue'] = True
if len(_result['items']) < self._max_results:
if 'continue' in _result:
del _result['continue']
if 'next_page_token' in _result:
del _result['next_page_token']
if 'offset' in _result:
del _result['offset']
return _result
shelf_index = None
if self._language != 'en' and not self._language.startswith('en-') and not page_token:
# shelf index is a moving target, make a request in english first to find the correct index by title
_en_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': 'US',
'acceptLanguage': 'en-US'
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'FEmy_youtube'
}
json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data)
shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title)
result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index)
return result
def get_saved_playlists(self, page_token, offset):
if not page_token:
page_token = ''
shelf_title = 'Saved Playlists'
result = {'items': [],
'next_page_token': page_token,
'offset': offset}
def _perform(_page_token, _offset, _result, _shelf_index=None):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
}
}
if _page_token:
_post_data['continuation'] = _page_token
else:
_post_data['browseId'] = 'FEmy_youtube'
_json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data)
_data = {}
if 'continuationContents' in _json_data:
_data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {})
elif 'contents' in _json_data:
_contents = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])
if _shelf_index is None:
_shelf_index = get_shelf_index_by_title(_context, _json_data, shelf_title)
if _shelf_index is not None:
_data = _contents[_shelf_index].get('shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {})
_items = _data.get('items', [])
if not _result:
_result = {'items': []}
_new_offset = self._max_results - len(_result['items']) + _offset
if _offset > 0:
_items = _items[_offset:]
_result['offset'] = _new_offset
for _item in _items:
_item = _item.get('gridPlaylistRenderer', {})
if _item:
_video_item = {'id': _item['playlistId'],
'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''),
'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', ''),
'channel_id': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('navigationEndpoint', {}).get('browseEndpoint', {}).get('browseId', ''),
'thumbnails': {'default': {'url': ''}, 'medium': {'url': ''}, 'high': {'url': ''}}}
_thumbs = _item.get('thumbnail', {}).get('thumbnails', [{}])
for _thumb in _thumbs:
_thumb_url = _thumb.get('url', '')
if _thumb_url.startswith('//'):
_thumb_url = ''.join(['https:', _thumb_url])
if _thumb_url.endswith('/default.jpg'):
_video_item['thumbnails']['default']['url'] = _thumb_url
elif _thumb_url.endswith('/mqdefault.jpg'):
_video_item['thumbnails']['medium']['url'] = _thumb_url
elif _thumb_url.endswith('/hqdefault.jpg'):
_video_item['thumbnails']['high']['url'] = _thumb_url
_result['items'].append(_video_item)
_continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '')
if _continuations and len(_result['items']) <= self._max_results:
_result['next_page_token'] = _continuations
if len(_result['items']) < self._max_results:
_result = _perform(_page_token=_continuations, _offset=0, _result=_result, _shelf_index=_shelf_index)
# trim result
if len(_result['items']) > self._max_results:
_items = _result['items']
_items = _items[:self._max_results]
_result['items'] = _items
_result['continue'] = True
if len(_result['items']) < self._max_results:
if 'continue' in _result:
del _result['continue']
if 'next_page_token' in _result:
del _result['next_page_token']
if 'offset' in _result:
del _result['offset']
return _result
shelf_index = None
if self._language != 'en' and not self._language.startswith('en-') and not page_token:
# shelf index is a moving target, make a request in english first to find the correct index by title
_en_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': 'US',
'acceptLanguage': 'en-US'
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'FEmy_youtube'
}
json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_en_post_data)
shelf_index = get_shelf_index_by_title(_context, json_data, shelf_title)
result = _perform(_page_token=page_token, _offset=offset, _result=result, _shelf_index=shelf_index)
return result
def clear_watch_history(self):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
}
}
_json_data = self.perform_v1_tv_request(method='POST', path='history/clear_watch_history', post_data=_post_data)
return _json_data
def get_watch_history(self, page_token=None, offset=0):
if not page_token:
page_token = ''
result = {'items': [],
'next_page_token': page_token,
'offset': offset}
def _perform(_page_token, _offset, _result):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'FEhistory'
}
if _page_token:
_post_data['continuation'] = _page_token
_json_data = self.perform_v1_tv_request(method='POST', path='browse', post_data=_post_data)
_data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get(
'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {})
if not _data:
_data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {})
_items = _data.get('items', [])
if not _result:
_result = {'items': []}
_new_offset = self._max_results - len(_result['items']) + _offset
if _offset > 0:
_items = _items[_offset:]
_result['offset'] = _new_offset
for _item in _items:
_item = _item.get('gridVideoRenderer', {})
if _item:
_video_item = {'id': _item['videoId'],
'title': _item.get('title', {}).get('runs', [{}])[0].get('text', ''),
'channel': _item.get('shortBylineText', {}).get('runs', [{}])[0].get('text', '')}
_result['items'].append(_video_item)
_continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '')
if _continuations and len(_result['items']) <= self._max_results:
_result['next_page_token'] = _continuations
if len(_result['items']) < self._max_results:
_result = _perform(_page_token=_continuations, _offset=0, _result=_result)
# trim result
if len(_result['items']) > self._max_results:
_items = _result['items']
_items = _items[:self._max_results]
_result['items'] = _items
_result['continue'] = True
if len(_result['items']) < self._max_results:
if 'continue' in _result:
del _result['continue']
if 'next_page_token' in _result:
del _result['next_page_token']
if 'offset' in _result:
del _result['offset']
return _result
return _perform(_page_token=page_token, _offset=offset, _result=result)
def get_watch_later_id(self):
watch_later_id = ''
def _get_items(_continuation=None):
post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': 'US',
'acceptLanguage': 'en-US'
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'default'
}
if _continuation:
post_data['continuation'] = _continuation
return self.perform_v1_tv_request(method='POST', path='browse', post_data=post_data)
current_page = 1
pages = 30 # 28 seems to be page limit, add a couple page padding, loop will break when there is no next page data
progress_dialog = _context.get_ui().create_progress_dialog(_context.get_name(),
_context.localize(constants.localize.COMMON_PLEASE_WAIT),
background=True)
progress_dialog.set_total(pages)
progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page))
try:
json_data = _get_items()
while current_page < pages:
contents = json_data.get('contents', json_data.get('continuationContents', {}))
section = contents.get('sectionListRenderer', contents.get('sectionListContinuation', {}))
contents = section.get('contents', [{}])
for shelf in contents:
renderer = shelf.get('shelfRenderer', {})
endpoint = renderer.get('endpoint', {})
browse_endpoint = endpoint.get('browseEndpoint', {})
browse_id = browse_endpoint.get('browseId', '')
title = renderer.get('title', {})
title_runs = title.get('runs', [{}])[0]
title_text = title_runs.get('text', '')
if (title_text.lower() == 'watch later') and (browse_id.startswith('VLPL') or browse_id.startswith('PL')):
watch_later_id = browse_id.lstrip('VL')
break
if watch_later_id:
break
continuations = section.get('continuations', [{}])[0]
next_continuation_data = continuations.get('nextContinuationData', {})
continuation = next_continuation_data.get('continuation', '')
if continuation:
current_page += 1
progress_dialog.update(steps=1, text=_context.localize(constants.localize.WATCH_LATER_RETRIEVAL_PAGE) % str(current_page))
json_data = _get_items(continuation)
continue
else:
break
finally:
progress_dialog.close()
return watch_later_id
def perform_v3_request(self, method='GET', headers=None, path=None, post_data=None, params=None,
allow_redirects=True, no_login=False):
yt_config = self._config
if not yt_config.get('key'):
return {
'error':
{
'errors': [{'reason': 'accessNotConfigured'}],
'message': 'No API keys provided'
}
}
# params
if not params:
params = {}
_params = {'key': yt_config['key']}
_params.update(params)
# headers
if not headers:
headers = {}
_headers = {'Host': 'www.googleapis.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36',
'Accept-Encoding': 'gzip, deflate'}
# a config can decide if a token is allowed
if self._access_token and yt_config.get('token-allowed', True) and not no_login:
_headers['Authorization'] = 'Bearer %s' % self._access_token
_headers.update(headers)
# url
_url = 'https://www.googleapis.com/youtube/v3/%s' % path.strip('/')
result = None
log_params = copy.deepcopy(params)
if 'location' in log_params:
log_params['location'] = 'xx.xxxx,xx.xxxx'
_context.log_debug('[data] v3 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, log_params, post_data))
if method == 'GET':
result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects)
elif method == 'POST':
_headers['content-type'] = 'application/json'
result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
elif method == 'PUT':
_headers['content-type'] = 'application/json'
result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
elif method == 'DELETE':
result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
if result is None:
return {}
if result.headers.get('content-type', '').startswith('application/json'):
return result.json()
def perform_v1_tv_request(self, method='GET', headers=None, path=None, post_data=None, params=None,
allow_redirects=True):
# params
if not params:
params = {}
_params = {'key': self._config_tv['key']}
_params.update(params)
# headers
if not headers:
headers = {}
_headers = {'Host': 'www.googleapis.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36',
'Origin': 'https://www.youtube.com',
'Accept': '*/*',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
if self._access_token_tv:
_headers['Authorization'] = 'Bearer %s' % self._access_token_tv
_headers.update(headers)
# url
_url = 'https://www.googleapis.com/youtubei/v1/%s' % path.strip('/')
result = None
_context.log_debug('[i] v1 request: |{0}| path: |{1}| params: |{2}| post_data: |{3}|'.format(method, path, params, post_data))
if method == 'GET':
result = requests.get(_url, params=_params, headers=_headers, verify=self._verify, allow_redirects=allow_redirects)
elif method == 'POST':
_headers['content-type'] = 'application/json'
result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
elif method == 'PUT':
_headers['content-type'] = 'application/json'
result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
elif method == 'DELETE':
result = requests.delete(_url, params=_params, headers=_headers, verify=self._verify,
allow_redirects=allow_redirects)
if result is None:
return {}
if result.headers.get('content-type', '').startswith('application/json'):
return result.json()
|
i3-cycle-focus.py
|
#!/usr/bin/env python3
import os
import socket
import selectors
import threading
from argparse import ArgumentParser
import i3ipc
SOCKET_FILE = '/tmp/i3-cycle-focus'
MAX_WIN_HISTORY = 16
UPDATE_DELAY = 2.0
class FocusWatcher:
def __init__(self):
self.i3 = i3ipc.Connection()
self.i3.on('window::focus', self.on_window_focus)
self.listening_socket = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
self.listening_socket.bind(SOCKET_FILE)
self.listening_socket.listen(1)
self.window_list = []
self.window_list_lock = threading.RLock()
self.focus_timer = None
self.window_index = 1
def update_windowlist(self, window_id):
with self.window_list_lock:
if window_id in self.window_list:
self.window_list.remove(window_id)
self.window_list.insert(0, window_id)
if len(self.window_list) > MAX_WIN_HISTORY:
del self.window_list[MAX_WIN_HISTORY:]
self.window_index = 1
def get_valid_windows(self):
tree = self.i3.get_tree()
if args.active_workspace:
return set(w.id for w in tree.find_focused().workspace().leaves())
elif args.visible_workspaces:
ws_list = []
w_set = set()
for item in self.i3.get_outputs():
ws_list.append(item["current_workspace"])
for ws in tree.workspaces():
if str(ws.num) in ws_list:
for w in ws.leaves():
w_set.add(w.id)
return w_set
else:
return set(w.id for w in tree.leaves())
def on_window_focus(self, i3conn, event):
if args.ignore_float and (event.container.props.floating == "user_on" or
event.container.props.floating == "auto_on"):
return
if UPDATE_DELAY != 0.0:
if self.focus_timer is not None:
self.focus_timer.cancel()
self.focus_timer = threading.Timer(UPDATE_DELAY,
self.update_windowlist,
[event.container.props.id])
self.focus_timer.start()
else:
self.update_windowlist(event.container.props.id)
def launch_i3(self):
self.i3.main()
def launch_server(self):
selector = selectors.DefaultSelector()
def accept(sock):
conn, addr = sock.accept()
selector.register(conn, selectors.EVENT_READ, read)
def read(conn):
data = conn.recv(1024)
if data == b'switch':
with self.window_list_lock:
windows = self.get_valid_windows()
for window_id in self.window_list[self.window_index:]:
if window_id not in windows:
self.window_list.remove(window_id)
else:
if self.window_index < (len(self.window_list) - 1):
self.window_index += 1
else:
self.window_index = 0
self.i3.command('[con_id=%s] focus' % window_id)
break
elif not data:
selector.unregister(conn)
conn.close()
selector.register(self.listening_socket, selectors.EVENT_READ, accept)
while True:
for key, event in selector.select():
callback = key.data
callback(key.fileobj)
def run(self):
t_i3 = threading.Thread(target=self.launch_i3)
t_server = threading.Thread(target=self.launch_server)
for t in (t_i3, t_server):
t.start()
if __name__ == '__main__':
parser = ArgumentParser(prog='i3-cycle-focus.py',
description="""
Cycle backwards through the history of focused windows (aka Alt-Tab).
This script should be launched from ~/.xsession or ~/.xinitrc.
Use the `--history` option to set the maximum number of windows to be
stored in the focus history (Default 16 windows).
Use the `--delay` option to set the delay between focusing the
selected window and updating the focus history (Default 2.0 seconds).
Use a value of 0.0 seconds to toggle focus only between the current
and the previously focused window. Use the `--ignore-floating` option
to exclude all floating windows when cycling and updating the focus
history. Use the `--visible-workspaces` option to include windows on
visible workspaces only when cycling the focus history. Use the
`--active-workspace` option to include windows on the active workspace
only when cycling the focus history.
To trigger focus switching, execute the script from a keybinding with
the `--switch` option.""")
parser.add_argument('--history', dest='history',
help='Maximum number of windows in the focus history',
type=int)
parser.add_argument('--delay', dest='delay',
help='Delay before updating focus history',
type=float)
parser.add_argument('--ignore-floating', dest='ignore_float',
action='store_true', help='Ignore floating windows '
'when cycling and updating the focus history')
parser.add_argument('--visible-workspaces', dest='visible_workspaces',
action='store_true', help='Include windows on visible '
'workspaces only when cycling the focus history')
parser.add_argument('--active-workspace', dest='active_workspace',
action='store_true', help='Include windows on the '
'active workspace only when cycling the focus history')
parser.add_argument('--switch', dest='switch', action='store_true',
help='Switch to the previous window', default=False)
args = parser.parse_args()
if args.history:
MAX_WIN_HISTORY = args.history
if args.delay:
UPDATE_DELAY = args.delay
else:
if args.delay == 0.0:
UPDATE_DELAY = args.delay
if not args.switch:
focus_watcher = FocusWatcher()
focus_watcher.run()
else:
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect(SOCKET_FILE)
client_socket.send(b'switch')
client_socket.close()
|
test_remote.py
|
import logging
import os
import socket
import time
from multiprocessing import Queue
from threading import Thread
import env
import pytest
import plumbum
from plumbum import (
NOHUP,
CommandNotFound,
ProcessExecutionError,
ProcessTimedOut,
RemotePath,
SshMachine,
local,
)
from plumbum._testtools import skip_on_windows, skip_without_chown
from plumbum.machines.session import HostPublicKeyUnknown, IncorrectLogin
try:
import paramiko
except ImportError:
paramiko = None
else:
from plumbum.machines.paramiko_machine import ParamikoMachine
pytestmark = pytest.mark.ssh
def strassert(one, two):
assert str(one) == str(two)
# TEST_HOST = "192.168.1.143"
TEST_HOST = "127.0.0.1"
if TEST_HOST not in ("::1", "127.0.0.1", "localhost"):
plumbum.local.env.path.append("c:\\Program Files\\Git\\bin")
@pytest.fixture(scope="session")
def sshpass():
try:
return plumbum.local["sshpass"]
except CommandNotFound:
pytest.skip("Test requires sshpass")
@skip_on_windows
def test_connection():
SshMachine(TEST_HOST)
def test_incorrect_login(sshpass):
with pytest.raises(IncorrectLogin):
SshMachine(
TEST_HOST,
password="swordfish",
ssh_opts=[
"-o",
"PubkeyAuthentication=no",
"-o",
"PreferredAuthentications=password",
],
)
@pytest.mark.xfail(env.LINUX, reason="TODO: no idea why this fails on linux")
def test_hostpubkey_unknown(sshpass):
with pytest.raises(HostPublicKeyUnknown):
SshMachine(
TEST_HOST,
password="swordfish",
ssh_opts=["-o", "UserKnownHostsFile=/dev/null", "-o", "UpdateHostKeys=no"],
)
@skip_on_windows
class TestRemotePath:
def _connect(self):
return SshMachine(TEST_HOST)
def test_name(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").name
assert isinstance(name, str)
assert "file.txt" == str(name)
def test_dirname(self):
name = RemotePath(self._connect(), "/some/long/path/to/file.txt").dirname
assert isinstance(name, RemotePath)
assert "/some/long/path/to" == str(name)
def test_uri(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert "ftp://" == p1.as_uri("ftp")[:6]
assert "ssh://" == p1.as_uri("ssh")[:6]
assert "/some/long/path/to/file.txt" == p1.as_uri()[-27:]
def test_stem(self):
p = RemotePath(self._connect(), "/some/long/path/to/file.txt")
assert p.stem == "file"
p = RemotePath(self._connect(), "/some/long/path/")
assert p.stem == "path"
def test_suffix(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
assert p1.suffix == ".txt"
assert p1.suffixes == [".txt"]
assert p2.suffix == ".gz"
assert p2.suffixes == [".tar", ".gz"]
strassert(
p1.with_suffix(".tar.gz"),
RemotePath(self._connect(), "/some/long/path/to/file.tar.gz"),
)
strassert(
p2.with_suffix(".other"), RemotePath(self._connect(), "file.tar.other")
)
strassert(
p2.with_suffix(".other", 2), RemotePath(self._connect(), "file.other")
)
strassert(
p2.with_suffix(".other", 0),
RemotePath(self._connect(), "file.tar.gz.other"),
)
strassert(
p2.with_suffix(".other", None), RemotePath(self._connect(), "file.other")
)
def test_newname(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = RemotePath(self._connect(), "file.tar.gz")
strassert(
p1.with_name("something.tar"),
RemotePath(self._connect(), "/some/long/path/to/something.tar"),
)
strassert(
p2.with_name("something.tar"), RemotePath(self._connect(), "something.tar")
)
@skip_without_chown
def test_chown(self):
with self._connect() as rem:
with rem.tempdir() as dir:
p = dir / "foo.txt"
p.write(b"hello")
# because we're connected to localhost, we expect UID and GID to be the same
assert p.uid == os.getuid()
assert p.gid == os.getgid()
p.chown(p.uid.name)
assert p.uid == os.getuid()
def test_parent(self):
p1 = RemotePath(self._connect(), "/some/long/path/to/file.txt")
p2 = p1.parent
assert str(p2) == "/some/long/path/to"
def test_mkdir(self):
# (identical to test_local.TestLocalPath.test_mkdir)
with self._connect() as rem:
with rem.tempdir() as tmp:
(tmp / "a").mkdir(exist_ok=False, parents=False)
assert (tmp / "a").exists()
assert (tmp / "a").is_dir()
(tmp / "a").mkdir(exist_ok=True, parents=False)
(tmp / "a").mkdir(exist_ok=True, parents=True)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=False)
with pytest.raises(OSError):
(tmp / "a").mkdir(exist_ok=False, parents=True)
(tmp / "b" / "bb").mkdir(exist_ok=False, parents=True)
assert (tmp / "b" / "bb").exists()
assert (tmp / "b" / "bb").is_dir()
assert not tmp.exists()
@pytest.mark.xfail(
reason="mkdir's mode argument is not yet implemented " "for remote paths",
strict=True,
)
def test_mkdir_mode(self):
# (identical to test_local.TestLocalPath.test_mkdir_mode)
with self._connect() as rem:
with rem.tempdir() as tmp:
# just verify that mode argument works the same way it does for
# Python's own os.mkdir, which takes into account the umask
# (different from shell mkdir mode argument!); umask on my
# system is 022 by default, so 033 is ok for testing this
try:
(tmp / "pb_333").mkdir(exist_ok=False, parents=False, mode=0o333)
rem.python(
"-c",
"import os; os.mkdir({}, 0o333)".format(
repr(str(tmp / "py_333"))
),
)
pb_final_mode = oct((tmp / "pb_333").stat().st_mode)
py_final_mode = oct((tmp / "py_333").stat().st_mode)
assert pb_final_mode == py_final_mode
finally:
# we have to revert this so the tempdir deletion works
if (tmp / "pb_333").exists():
(tmp / "pb_333").chmod(0o777)
if (tmp / "py_333").exists():
(tmp / "py_333").chmod(0o777)
assert not tmp.exists()
def test_copy(self):
"""
tests `RemotePath.copy` for the following scenarios:
* copying a simple file from `file_a` to `copy_of_a` succeeds
* copying file `file_a` into a directory `a_dir/copy_of_a` succeeds
* copying a directory `a_dir` over an existing directory path with
`override=False` fails
* copying a directory `a_dir` over an existing directory path with
`override=True` succeeds
"""
with self._connect() as rem:
with rem.tempdir() as tmp:
# setup a file and make sure it exists...
(tmp / "file_a").touch()
assert (tmp / "file_a").exists()
assert (tmp / "file_a").is_file()
# setup a directory for copying into...
(tmp / "a_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "a_dir").exists()
assert (tmp / "a_dir").is_dir()
# setup a 2nd directory for testing `override=False`
(tmp / "b_dir").mkdir(exist_ok=False, parents=False)
assert (tmp / "b_dir").exists()
assert (tmp / "b_dir").is_dir()
# copying a simple file
(tmp / "file_a").copy(tmp / "copy_of_a")
assert (tmp / "copy_of_a").exists()
assert (tmp / "copy_of_a").is_file()
# copying into a directory
(tmp / "file_a").copy(tmp / "a_dir/copy_of_a")
assert (tmp / "a_dir/copy_of_a").exists()
assert (tmp / "a_dir/copy_of_a").is_file()
# copying a directory on top of an existing directory using
# `override=False` (should fail with TypeError)
with pytest.raises(TypeError):
(tmp / "a_dir").copy(tmp / "b_dir", override=False)
# copying a directory on top of an existing directory using
# `override=True` (should copy transparently)
(tmp / "a_dir").copy(tmp / "b_dir", override=True)
assert "copy_of_a" in (tmp / "b_dir")
assert not tmp.exists()
class BaseRemoteMachineTest:
TUNNEL_PROG_AF_INET = r"""import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write("{0}\n".format(s.getsockname()[1]))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
TUNNEL_PROG_AF_UNIX = r"""import sys, socket, tempfile
s = socket.socket(family=socket.AF_UNIX)
socket_location = tempfile.NamedTemporaryFile()
socket_location.close()
s.bind(socket_location.name)
s.listen(1)
sys.stdout.write("{0}\n".format(s.getsockname()))
sys.stdout.flush()
s2, _ = s.accept()
data = s2.recv(100)
s2.send(b"hello " + data)
s2.close()
s.close()
"""
def test_basic(self):
with self._connect() as rem:
r_ssh = rem["ssh"]
r_ls = rem["ls"]
r_grep = rem["grep"]
lines = r_ls("-a").splitlines()
assert ".bashrc" in lines or ".bash_profile" in lines
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
cmd = r_ssh[
"localhost", "cd", rem.cwd, "&&", r_ls, "|", r_grep["\\.py"]
]
assert "'|'" in str(cmd)
assert "test_remote.py" in cmd()
assert "test_remote.py" in [f.name for f in rem.cwd // "*.py"]
# Testing for #271
def test_double_chdir(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem["ls"]()
with rem.cwd("/tmp"):
rem["pwd"]()
def test_glob(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*.py", "*.bash")]
assert "test_remote.py" in filenames
assert "slow_process.bash" in filenames
def test_glob_spaces(self):
with self._connect() as rem:
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
filenames = [f.name for f in rem.cwd // ("*space.txt")]
assert "file with space.txt" in filenames
filenames = [f.name for f in rem.cwd // ("*with space.txt")]
assert "file with space.txt" in filenames
def test_cmd(self):
with self._connect() as rem:
rem.cmd.ls("/tmp")
@pytest.mark.usefixtures("testdir")
def test_download_upload(self):
with self._connect() as rem:
rem.upload("test_remote.py", "/tmp")
r_ls = rem["ls"]
r_rm = rem["rm"]
assert "test_remote.py" in r_ls("/tmp").splitlines()
rem.download("/tmp/test_remote.py", "/tmp/test_download.txt")
r_rm("/tmp/test_remote.py")
r_rm("/tmp/test_download.txt")
def test_session(self):
with self._connect() as rem:
sh = rem.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
assert ".bashrc" in out or ".bash_profile" in out
@pytest.mark.xfail(env.PYPY, reason="PyPy sometimes fails here", strict=False)
def test_env(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;os.environ['FOOBAR72']")
with rem.env(FOOBAR72="lala"):
with rem.env(FOOBAR72="baba"):
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "baba"
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == "lala"
# path manipulation
with pytest.raises(CommandNotFound):
rem.which("dummy-executable")
with rem.cwd(os.path.dirname(os.path.abspath(__file__))):
rem.env.path.insert(0, rem.cwd / "not-in-path")
p = rem.which("dummy-executable")
assert p == rem.cwd / "not-in-path" / "dummy-executable"
@pytest.mark.xfail(env.PYPY, reason="PyPy sometimes fails here", strict=False)
@pytest.mark.parametrize(
"env",
[
"lala",
"-Wl,-O2 -Wl,--sort-common",
"{{}}",
"''",
"!@%_-+=:",
"'",
"`",
"$",
"\\",
],
)
def test_env_special_characters(self, env):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError):
rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
rem.env["FOOBAR72"] = env
out = rem.python("-c", "import os;print(os.environ['FOOBAR72'])")
assert out.strip() == env
def test_read_write(self):
with self._connect() as rem:
with rem.tempdir() as dir:
assert dir.is_dir()
data = b"hello world"
(dir / "foo.txt").write(data)
assert (dir / "foo.txt").read() == data
assert not dir.exists()
def test_contains(self):
with self._connect() as rem:
assert "ls" in rem
def test_iter_lines_timeout(self):
with self._connect() as rem:
try:
for i, (out, err) in enumerate( # noqa: B007
rem["ping"]["-i", 0.5, "127.0.0.1"].popen().iter_lines(timeout=4)
):
print("out:", out)
print("err:", err)
except NotImplementedError as err:
pytest.skip(str(err))
except ProcessTimedOut:
assert i > 3
else:
pytest.fail("Expected a timeout")
def test_iter_lines_error(self):
with self._connect() as rem:
with pytest.raises(ProcessExecutionError) as ex:
for i, _lines in enumerate(rem["ls"]["--bla"].popen()): # noqa: B007
pass
assert i == 1
assert "/bin/ls: " in ex.value.stderr
def test_touch(self):
with self._connect() as rem:
rfile = rem.cwd / "sillyfile"
assert not rfile.exists()
rfile.touch()
assert rfile.exists()
rfile.delete()
def serve_reverse_tunnel(queue):
s = socket.socket()
s.bind(("", 12223))
s.listen(1)
s2, _ = s.accept()
data = s2.recv(100).decode("ascii").strip()
queue.put(data)
s2.close()
s.close()
@skip_on_windows
class TestRemoteMachine(BaseRemoteMachineTest):
def _connect(self):
return SshMachine(TEST_HOST)
def test_tunnel(self):
for tunnel_prog in (self.TUNNEL_PROG_AF_INET, self.TUNNEL_PROG_AF_UNIX):
with self._connect() as rem:
p = (rem.python["-u"] << tunnel_prog).popen()
port_or_socket = p.stdout.readline().decode("ascii").strip()
try:
port_or_socket = int(port_or_socket)
dhost = "localhost"
except ValueError:
dhost = None
with rem.tunnel(12222, port_or_socket, dhost=dhost):
s = socket.socket()
s.connect(("localhost", 12222))
s.send(b"world")
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_reverse_tunnel(self):
with self._connect() as rem:
get_unbound_socket_remote = """import sys, socket
s = socket.socket()
s.bind(("", 0))
s.listen(1)
sys.stdout.write(str(s.getsockname()[1]))
sys.stdout.flush()
s.close()
"""
p = (rem.python["-u"] << get_unbound_socket_remote).popen()
remote_socket = p.stdout.readline().decode("ascii").strip()
queue = Queue()
tunnel_server = Thread(target=serve_reverse_tunnel, args=(queue,))
tunnel_server.start()
message = str(time.time())
with rem.tunnel(12223, remote_socket, dhost="localhost", reverse=True):
remote_send_af_inet = """import socket
s = socket.socket()
s.connect(("localhost", {}))
s.send("{}".encode("ascii"))
s.close()
""".format(
remote_socket, message
)
(rem.python["-u"] << remote_send_af_inet).popen()
tunnel_server.join(timeout=1)
assert queue.get() == message
def test_get(self):
with self._connect() as rem:
assert str(rem["ls"]) == str(rem.get("ls"))
assert str(rem["ls"]) == str(rem.get("not_a_valid_process_234", "ls"))
assert "ls" in rem
assert "not_a_valid_process_234" not in rem
def test_list_processes(self):
with self._connect() as rem:
assert list(rem.list_processes())
def test_pgrep(self):
with self._connect() as rem:
assert list(rem.pgrep("ssh"))
def test_nohup(self):
with self._connect() as rem:
sleep = rem["sleep"]
sleep["5.793817"] & NOHUP(stdout=None, append=False)
time.sleep(0.5)
print(rem["ps"]("aux"))
assert list(rem.pgrep("5.793817"))
time.sleep(6)
assert not list(rem.pgrep("5.793817"))
def test_bound_env(self):
with self._connect() as rem:
printenv = rem["printenv"]
with rem.env(FOO="hello"):
assert printenv.with_env(BAR="world")("FOO") == "hello\n"
assert printenv.with_env(BAR="world")("BAR") == "world\n"
assert printenv.with_env(FOO="sea", BAR="world")("FOO") == "sea\n"
assert printenv.with_env(FOO="sea", BAR="world")("BAR") == "world\n"
assert rem.cmd.pwd.with_cwd("/")() == "/\n"
assert rem.cmd.pwd["-L"].with_env(A="X").with_cwd("/")() == "/\n"
@pytest.mark.skipif(
"useradd" not in local, reason="System does not have useradd (Mac?)"
)
def test_sshpass(self):
with local.as_root():
local["useradd"]("-m", "-b", "/tmp", "testuser")
try:
with local.as_root():
try:
(local["passwd"] << "123456")("--stdin", "testuser")
except ProcessExecutionError:
# some versions of passwd don't support --stdin, nothing to do in this case
logging.warning("passwd failed")
return
with SshMachine("localhost", user="testuser", password="123456") as rem:
assert rem["pwd"]().strip() == "/tmp/testuser"
finally:
with local.as_root():
local["userdel"]("-r", "testuser")
@skip_on_windows
class TestParamikoMachine(BaseRemoteMachineTest):
def _connect(self):
if paramiko is None:
pytest.skip("System does not have paramiko installed")
return ParamikoMachine(TEST_HOST, missing_host_policy=paramiko.AutoAddPolicy())
def test_tunnel(self):
with self._connect() as rem:
p = rem.python["-c", self.TUNNEL_PROG_AF_INET].popen()
try:
port = int(p.stdout.readline().strip())
except ValueError:
print(p.communicate())
raise
s = rem.connect_sock(port)
s.send(b"world")
data = s.recv(100)
s.close()
print(p.communicate())
assert data == b"hello world"
def test_piping(self):
with self._connect() as rem:
try:
rem["ls"] | rem["cat"]
except NotImplementedError:
pass
else:
pytest.fail("Should not pipe")
@pytest.mark.xfail(message="Not working yet")
def test_encoding(self):
with self._connect() as rem:
unicode_half = b"\xc2\xbd".decode("utf8")
ret = rem["bash"]("-c", 'echo -e "\xC2\xBD"')
assert ret == "%s\n" % unicode_half
ret = list(rem["bash"]["-c", 'echo -e "\xC2\xBD"'].popen())
assert ret == [["%s\n" % unicode_half, None]]
def test_path_open_remote_write_local_read(self):
with self._connect() as rem:
with rem.tempdir() as remote_tmpdir, local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = b"hello world"
with (remote_tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.download((remote_tmpdir / "bar.txt"), (tmpdir / "bar.txt"))
assert (tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
def test_path_open_local_write_remote_read(self):
with self._connect() as rem:
with rem.tempdir() as remote_tmpdir, local.tempdir() as tmpdir:
assert remote_tmpdir.is_dir()
assert tmpdir.is_dir()
data = b"hello world"
with (tmpdir / "bar.txt").open("wb") as f:
f.write(data)
rem.upload((tmpdir / "bar.txt"), (remote_tmpdir / "bar.txt"))
assert (remote_tmpdir / "bar.txt").open("rb").read() == data
assert not remote_tmpdir.exists()
assert not tmpdir.exists()
|
route_reprogram.py
|
import tornado
from jupyter_server.base.handlers import APIHandler
import os
import json
from . import webds
from .programmer_manager import ProgrammerManager
from .touchcomm_manager import TouchcommManager
import threading
from queue import Queue
import time
import sys
from tornado import gen
from tornado.iostream import StreamClosedError
g_stdout_handler = None
g_program_thread = None
class StdoutHandler(Queue):
_progress = 0
_status = 'idle'
_message = None
def __init__(self):
super().__init__()
def write(self,msg):
try:
if "%" in msg:
progress = msg[12:-1]
self._progress = int(progress, base=10)
sys.__stdout__.write(msg)
except Exception as e:
print("Oops StdoutHandler write!", e.__class__, "occurred.")
pass
def flush(self):
sys.__stdout__.flush()
def get_progress(self):
return self._progress
def set_progress(self, num):
self._progress = num
def reset(self):
self._status = 'idle'
self._progress = 0
self._message = ''
def set_status(self, status):
self._status = status
def get_status(self):
return self._status
def get_message(self):
return self._message
def set_message(self, message):
self._message = message
class ProgramHandler(APIHandler):
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
def initialize(self):
self._last = 0
self.set_header('cache-control', 'no-cache')
@tornado.web.authenticated
@tornado.gen.coroutine
def publish(self, data):
"""Pushes data to a listener."""
try:
self.set_header('content-type', 'text/event-stream')
self.write('event: reprogram\n')
self.write('data: {}\n'.format(data))
self.write('\n')
yield self.flush()
except StreamClosedError:
print("stream close error!!")
raise
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
print("request progress")
try:
while True:
if g_stdout_handler is not None:
status = g_stdout_handler.get_status()
if status == 'start':
if self._last != g_stdout_handler.get_progress():
send = {
"progress": g_stdout_handler.get_progress(),
}
yield self.publish(json.dumps(send))
self._last = g_stdout_handler.get_progress()
elif status != 'start' and status != 'idle':
print(g_stdout_handler.get_message())
send = {
"progress": g_stdout_handler.get_progress(),
"status": status,
"message": g_stdout_handler.get_message()
}
print(json.dumps(send))
yield self.publish(json.dumps(send))
g_stdout_handler.reset()
self.finish(json.dumps({
"data": "done"
}))
break
yield gen.sleep(0.0001)
else:
yield gen.sleep(1)
except StreamClosedError:
message="stream closed"
print(message)
raise tornado.web.HTTPError(status_code=400, log_message=message)
print("request progress finished")
@tornado.web.authenticated
def post(self):
# input_data is a dictionary with a key "filename"
input_data = self.get_json_body()
print(input_data)
data = ""
global g_stdout_handler
global g_program_thread
action = input_data["action"]
if action == "start":
print("start to erase and program!!!")
filename = os.path.join(webds.PACKRAT_CACHE, input_data["filename"])
print(filename)
if not os.path.isfile(filename):
message = "HEX file not found: " + filename
raise tornado.web.HTTPError(status_code=400, log_message=message)
if g_program_thread is not None and g_program_thread.is_alive():
print("erase and program thread is still running...")
g_program_thread.join()
print("previous erase and program thread finished.")
if g_stdout_handler is None:
print("create StdoutHandler")
g_stdout_handler = StdoutHandler()
g_program_thread = threading.Thread(target=self.program, args=(filename, g_stdout_handler))
g_program_thread.start()
print("program thread start")
### g_program_thread.join()
data = {
"status": g_stdout_handler.get_status(),
}
print(data)
elif action == "cancel":
print("cancel thread")
data = {
"status": "TBC",
}
else:
print("unknown action" + action)
print(data)
self.finish(json.dumps(data))
def program(self, filename, handler):
temp = sys.stdout
sys.stdout = handler
handler.set_status("start")
try:
ret = ProgrammerManager.program(filename)
sys.stdout = temp
if handler.get_progress() != 100:
print(handler.get_progress())
handler.set_message("Unkwon error")
handler.set_progress(-1)
handler.set_status("error")
else:
print("Erase and program done.")
TouchcommManager().getInstance()
handler.set_message("Programmed with " + filename)
handler.set_status("success")
except Exception as error:
print(error)
handler.set_progress(-1)
handler.set_message(str(error))
handler.set_status("error")
|
status.py
|
import threading
import time
import fonts
from screen import Screen
from widgets.scrollingtext import ScrollingText
class StatusScreen(Screen):
def __init__(self, screen_manager, keyboard_manager, client):
super(StatusScreen, self).__init__(screen_manager, keyboard_manager)
self._client = client
font = fonts.DEFAULT_FONT_12
self._label = ScrollingText((0, 5), (125, 15), font, u'Status')
self._status = ScrollingText((0, 25), (125, 15), font, u'')
self._failure = ScrollingText((0, 45), (125, 15), font, u'')
self._active_mutex = threading.Condition()
self._thread_started_cond = threading.Condition()
thread = threading.Thread(target=self._poll)
thread.setDaemon(True)
self._thread_started_cond.acquire()
thread.start()
self._thread_started_cond.wait()
self._thread_started_cond.release()
def activate(self):
Screen.activate(self)
self._active_mutex.acquire()
self._active_mutex.notifyAll()
self._active_mutex.release()
def _poll(self):
while True:
self._active_mutex.acquire()
self._thread_started_cond.acquire()
self._thread_started_cond.notifyAll()
self._thread_started_cond.release()
self._active_mutex.wait()
self._active_mutex.release()
while self._active:
self._status.set_text(self._client.connection_status)
lf = self._client.last_connection_failure
if lf is None:
self._failure.set_text(u'')
else:
self._failure.set_text(lf)
if self._client.connected:
self._screen_manager.pop_screen()
break
time.sleep(2)
def widgets(self):
return [self._label, self._status, self._failure]
|
basicViewerPython.py
|
# Copyright (C) 2016-2017 RealVNC Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Note this sample makes use of PySide, which is licensed under the terms of
# the LGPL v2.1.
"""This file contains the basicViewerPython sample.
Usage:
python basicViewerPython.py [LOCAL_CLOUD_ADDRESS] [LOCAL_CLOUD_PASSWORD]
[PEER_CLOUD_ADDRESS]
python basicViewerPython.py [TCP_ADDRESS] [TCP_PORT]
Arguments:
LOCAL_CLOUD_ADDRESS - the VNC Cloud address for this Viewer
LOCAL_CLOUD_PASSWORD - the VNC Cloud password for this Viewer
PEER_CLOUD_ADDRESSS - the VNC Cloud address to connect to
TCP_ADDRESS - direct TCP address to connect to
TCP_PORT - direct TCP port number
The arguments may be omitted if they have been hard-coded in this file.
This sample shows how to implement a basic VNC viewer using the VNC SDK
Python bindings, using the PySide Qt bindings.
Two types of connectivity are supported: Cloud-based and direct TCP
connection. A viewer can only use one of these mechanisms at a time.
Note: To use direct TCP you will need to apply an add-on code; a trial
code is available from your RealVNC account. You can ignore TCP-related
code below if you do not intend to use the Direct TCP add-on.
The viewer attempts to connect to a server, using either Cloud-based or
direct TCP connectivity according to user-supplied connectivity details.
These details can be provided on the command line or hard-coded by editing the
Python file.
Because both the Qt UI library and the SDK use blocking event loops, we use a
separate thread to run the SDK, and run the UI in the main thread.
"""
import os
import sys
from PySide import QtCore, QtGui
from threading import Thread, Event
# Before importing the SDK bindings, we set the VNCSDK_LIBRARY environment
# variable, which determines where the Python bindings (vncsdk.py) will search
# for the shared library (DLL). This sample assumes the directory structure
# used to distribute the samples has been preserved, and searches for the
# shared accordingly. We also append the path of the Python bindings
# themselves to the search path.
sample_dir = os.path.dirname(os.path.abspath(__file__))
os.environ['VNCSDK_LIBRARY'] = os.path.join(sample_dir, '..', '..', 'lib')
sys.path.append(os.path.join(sample_dir, '..', '..', 'lib', 'python'))
import vncsdk
# For Cloud connections, either hard-code the Cloud address for the Viewer OR
# specify it at the command line. Example Cloud address:
# LxygGgSrhXQFiLj5M4M.LxyPXzA9sGLkB6pCtJv.devEX1Sg2Txs1CgVuW4.LxyPRsVnXoDoue4Xqm
LOCAL_CLOUD_ADDRESS = None
# Either hard-code the Cloud password associated with this Cloud address OR
# specify it at the command line. Example Cloud password: KMDgGgELSvAdvscgGfk2
LOCAL_CLOUD_PASSWORD = None
# Either hard-code the Cloud address of the Server (peer) to connect to OR
# specify it at the command line. Example peer Cloud address:
# LxyDgGgrhXQFiLj5M4M.LxyPXzA9sGLkB6pCtJv.devEX1Sg2Txs1CgVuW4.LxyPRydf9ZczNo13BcD
PEER_CLOUD_ADDRESS = None
# To enable direct TCP connectivity you need to copy the content of your
# add-on code into the string below.
direct_tcp_add_on_code = None
# For direct TCP connection you must provide the server's TCP host address
# and port number. Either edit TCP_ADDRESS and TCP_PORT variables below OR
# provide these connection details on the command line.
# The default direct TCP port number can be specified below by using:
# TCP_PORT = vncsdk.DirectTcp.DEFAULT_PORT
# Ignore these variables if you are not using the Direct TCP add-on
TCP_ADDRESS = None
TCP_PORT = 0
# The value of this flag is set automatically according to the user-supplied
# command line arguments and macro definitions above. Cloud connectivity is
# presumed by default here.
using_cloud = True
class ViewerWidget(QtGui.QWidget):
"""The ViewerWidget is the UI object. All its methods are invoked in the
main thread.
"""
# (Qt signals must be class variables rather than instance variables.)
signal_resized = QtCore.Signal(int, int, int, bytearray, bool)
signal_connected = QtCore.Signal()
signal_disconnected = QtCore.Signal(str)
signal_updated = QtCore.Signal()
signal_server_name_changed = QtCore.Signal(str)
# Title for the application
WINDOW_TITLE = "Basic Python viewer sample"
def __init__(self):
"""Create and initialise the window and its viewer widget."""
# Call the parent constructor
QtGui.QWidget.__init__(self)
# This Qt signal is fired when the Viewer connection succeeds
self.signal_connected.connect(self.on_connected)
# This Qt signal is fired when the Viewer connection ends
self.signal_disconnected.connect(self.on_disconnected)
# This Qt signal is fired when the framebuffer has been updated
self.signal_updated.connect(self.on_framebuffer_updated)
# This Qt signal is fired when the framebuffer has been resized
self.signal_resized.connect(self.on_framebuffer_resized)
# This Qt signal is fired when the Server's desktop name has changed
self.signal_server_name_changed.connect(self.on_server_name_changed)
self.resize_event = Event()
self.ignore_next_resize_event = False
self.conn_disconnected = False
self.buffer = self.canvas = None
self.shown = False
self.setWindowTitle(self.WINDOW_TITLE)
# Ensure we receive mouse and focus events
self.setMouseTracking(True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
#
# The following methods handle events from the Server, received via signals
# from the SDK thread:
#
def on_connected(self):
"""The Viewer just succeeded in connecting to the Server"""
# Hide the cursor - we'll see the server's cursor instead
self.setCursor(QtCore.Qt.BlankCursor)
def on_disconnected(self, message):
"""The Viewer connection has just ended"""
# Reset the cursor to the default
self.unsetCursor()
# Display the disconnection reason, if any
if message:
box = QtGui.QMessageBox(QtGui.QMessageBox.NoIcon, "Error", message,
buttons=QtGui.QMessageBox.Ok, parent=self)
box.exec_()
# Close the window. Setting conn_disconnected prevents us subsequently
# calling disconnect() on the connection.
self.conn_disconnected = True
self.close()
def on_framebuffer_updated(self):
"""The Viewer connection has received fresh data from the Server, so we
redraw the widget by triggering a Qt paint event.
"""
self.update()
def on_framebuffer_resized(self, width, height, stride, buffer,
resize_window):
"""The Server's framebuffer size has changed, so we reset our canvas to
use the new buffer and resize our window to match.
"""
# We take a reference to the buffer, to guarantee that it stays valid
# for the lifetime of the QImage, which will use it as its backing
# buffer.
self.buffer = buffer
self.canvas = QtGui.QImage(
buffer,
width,
height,
stride * 4,
QtGui.QImage.Format_RGB32
)
if resize_window:
# Set this flag so we don't enter an infinite loop of resizing
self.ignore_next_resize_event = True
self.resize(width, height)
if not self.shown:
# Until we receive a resize event from the SDK, we don't know what
# size the window should be, so the wait for the first event before
# showing the window.
self.shown = True
self.show()
def on_server_name_changed(self, name):
"""The Server's desktop name has changed."""
self.setWindowTitle("{title} - {name}".format(
title=self.WINDOW_TITLE,
name=name
))
#
# The following methods handle Qt events, received by subclassing QWidget:
#
def closeEvent(self, e):
"""The user has just attempted to close the Qt window."""
if not self.conn_disconnected:
vncsdk.EventLoop.run_on_loop(viewer_conn.on_closed)
# We don't shut the window immediately, it should stay open until
# the connection has been cleanly closed, at which point we receive
# the on_disconnected() signal and exit the app.
e.ignore()
def paintEvent(self, e):
"""The Qt window must be redrawn, so we paint using our canvas."""
if not self.canvas:
# Ignore paint events before we've set up our canvas
return
painter = QtGui.QPainter(self)
painter.drawImage(0, 0, self.canvas)
def resizeEvent(self, e):
"""The user has just attempted to resize the Qt window, or we have just
called resize() to match a change in the Server's screen size. We
distinguish these two cases using the ignore_next_resize_event flag.
"""
if self.ignore_next_resize_event:
self.ignore_next_resize_event = False
else:
self.resize_event.clear()
vncsdk.EventLoop.run_on_loop(viewer_conn.on_widget_resized,
(e.size().width(), e.size().height(),
self.resize_event))
# Wait for the SDK thread to process the resize, to prevent us
# from spamming it with resize requests.
self.resize_event.wait()
def keyPressEvent(self, e):
"""The Qt window has been sent keyboard input, which we send to the
Server.
"""
# A mapping between the Qt non-printable keys and the SDK keysyms
key_map = {
int(QtCore.Qt.Key_Escape): vncsdk.Keyboard.XK_Escape,
int(QtCore.Qt.Key_Return): vncsdk.Keyboard.XK_Return,
int(QtCore.Qt.Key_Enter): vncsdk.Keyboard.XK_KP_Enter,
int(QtCore.Qt.Key_Insert): vncsdk.Keyboard.XK_Insert,
int(QtCore.Qt.Key_Delete): vncsdk.Keyboard.XK_Delete,
int(QtCore.Qt.Key_Pause): vncsdk.Keyboard.XK_Pause,
int(QtCore.Qt.Key_Print): vncsdk.Keyboard.XK_Print,
int(QtCore.Qt.Key_SysReq): vncsdk.Keyboard.XK_Sys_Req,
int(QtCore.Qt.Key_Home): vncsdk.Keyboard.XK_Home,
int(QtCore.Qt.Key_End): vncsdk.Keyboard.XK_End,
int(QtCore.Qt.Key_Left): vncsdk.Keyboard.XK_Left,
int(QtCore.Qt.Key_Up): vncsdk.Keyboard.XK_Up,
int(QtCore.Qt.Key_Right): vncsdk.Keyboard.XK_Right,
int(QtCore.Qt.Key_Down): vncsdk.Keyboard.XK_Down,
int(QtCore.Qt.Key_PageUp): vncsdk.Keyboard.XK_Page_Up,
int(QtCore.Qt.Key_PageDown): vncsdk.Keyboard.XK_Page_Down,
int(QtCore.Qt.Key_Shift): vncsdk.Keyboard.XK_Shift_L,
int(QtCore.Qt.Key_Alt): vncsdk.Keyboard.XK_Alt_L,
int(QtCore.Qt.Key_F1): vncsdk.Keyboard.XK_F1,
int(QtCore.Qt.Key_F2): vncsdk.Keyboard.XK_F2,
int(QtCore.Qt.Key_F3): vncsdk.Keyboard.XK_F3,
int(QtCore.Qt.Key_F4): vncsdk.Keyboard.XK_F4,
int(QtCore.Qt.Key_F5): vncsdk.Keyboard.XK_F5,
int(QtCore.Qt.Key_F6): vncsdk.Keyboard.XK_F6,
int(QtCore.Qt.Key_F7): vncsdk.Keyboard.XK_F7,
int(QtCore.Qt.Key_F8): vncsdk.Keyboard.XK_F8,
int(QtCore.Qt.Key_F9): vncsdk.Keyboard.XK_F9,
int(QtCore.Qt.Key_F10): vncsdk.Keyboard.XK_F10,
int(QtCore.Qt.Key_F11): vncsdk.Keyboard.XK_F11,
int(QtCore.Qt.Key_F12): vncsdk.Keyboard.XK_F12,
}
if sys.platform == 'darwin': # Mac OS X
key_map[int(QtCore.Qt.Key_Control)] = vncsdk.Keyboard.XK_Alt_L
key_map[int(QtCore.Qt.Key_Meta)] = vncsdk.Keyboard.XK_Control_L
ctrl_modifier = QtCore.Qt.MetaModifier
else:
key_map[int(QtCore.Qt.Key_Control)] = vncsdk.Keyboard.XK_Control_L
key_map[int(QtCore.Qt.Key_Meta)] = vncsdk.Keyboard.XK_Super_L
ctrl_modifier = QtCore.Qt.ControlModifier
# Try first to send the keycode as a keysym directly, to handle non-
# printing keycodes, which don't have associated Unicode text.
keysym = key_map.get(e.key())
keycode = e.key()
if keysym:
vncsdk.EventLoop.run_on_loop(viewer_conn.on_key_press,
(keysym, False, keycode))
return
# Otherwise, it's presumably a Unicode key, so we should send it the
# codepoints as Unicode keysyms.
if e.modifiers() & ctrl_modifier:
# If the Ctrl key is down then the result of e.text() is
# platform-dependent, so instead we use e.keys() if it lies
# within the printable ASCII range, otherwise we ignore the key.
if keycode >= 0x20 and keycode <= 0x7e:
char = chr(e.key())
# If Shift is NOT down then we need to convert the character
# to lowercase, otherwise the server will press Shift for us.
if not (e.modifiers() & QtCore.Qt.ShiftModifier):
char = char.lower()
vncsdk.EventLoop.run_on_loop(viewer_conn.on_key_press,
(ord(char), True, keycode))
else:
for unichar in e.text():
vncsdk.EventLoop.run_on_loop(viewer_conn.on_key_press,
(ord(unichar), True, keycode))
def keyReleaseEvent(self, e):
"""The Qt window has been sent keyboard input, which we send to the
Server.
"""
keycode = e.key()
vncsdk.EventLoop.run_on_loop(viewer_conn.on_key_release, (keycode,))
def mouseEvent(self, e):
"""The Qt window has been sent mouse input, which we send to the
Server. This method only handles click and move events, not scrollwheel
events.
"""
# A mapping between the Qt enumerations and the SDK enumerations
mouse_map = {
int(QtCore.Qt.LeftButton):
vncsdk.Viewer.MouseButton.MOUSE_BUTTON_LEFT,
int(QtCore.Qt.RightButton):
vncsdk.Viewer.MouseButton.MOUSE_BUTTON_RIGHT,
int(QtCore.Qt.MiddleButton):
vncsdk.Viewer.MouseButton.MOUSE_BUTTON_MIDDLE
}
raw_buttons = int(e.buttons())
mouse_mask = {v for k, v in mouse_map.items() if k & raw_buttons}
vncsdk.EventLoop.run_on_loop(viewer_conn.on_pointer_event,
(e.x(), e.y(), mouse_mask))
mouseMoveEvent = mouseEvent
mousePressEvent = mouseEvent
mouseReleaseEvent = mouseEvent
def wheelEvent(self, e):
"""The Qt window has been sent mouse scroll input, which we send to the
Server.
"""
# Qt's units are scaled for high-resolution scrolling devices, whereas
# the SDK uses the more common Windows units, so we rescale the delta
# using Microsoft's "WHEEL_DELTA" factor of 120.
delta = int(e.delta() / 120)
axis = vncsdk.Viewer.MouseWheel.MOUSE_WHEEL_VERTICAL
vncsdk.EventLoop.run_on_loop(viewer_conn.on_scroll_event,
(delta, axis))
def focusOutEvent(self, e):
"""The Qt window has lost focus, so we release all pressed keys."""
vncsdk.EventLoop.run_on_loop(viewer_conn.on_focus_out_event)
class ViewerConn(vncsdk.Viewer.ConnectionCallback,
vncsdk.Viewer.FramebufferCallback,
vncsdk.Viewer.ServerEventCallback):
"""The ViewerConn owns the SDK's Viewer object, representing the Viewer's
connection to the Server, and handles notifications from the SDK's
callbacks. All its methods are invoked in the SDK thread.
"""
def __init__(self):
vncsdk.Viewer.ConnectionCallback.__init__(self)
vncsdk.Viewer.FramebufferCallback.__init__(self)
vncsdk.Viewer.ServerEventCallback.__init__(self)
self.viewer = vncsdk.Viewer()
self.is_connected = False
self.viewer.set_connection_callback(self)
self.viewer.set_framebuffer_callback(self)
self.viewer.set_server_event_callback(self)
# Set the Qt widget's initial size to the initial size of the Viewer.
w = self.viewer.get_viewer_fb_width()
h = self.viewer.get_viewer_fb_height()
self._set_buffer(w, h)
viewer_widget.signal_resized.emit(w, h, w, self.buffer, True)
def destroy(self):
self.viewer.destroy()
self.viewer = None
def _set_buffer(self, width, height):
# We set our pixel buffer to be a new buffer with a matching size, and
# choose the SDK's rgb888() format, which corresponds to Qt's
# QImage.Format_RGB32.
self.buffer = bytearray(width * height * 4)
self.viewer.set_viewer_fb(
self.buffer,
vncsdk.PixelFormat.rgb888(),
width, height, width
)
#
# The following methods handle notifications from the SDK of events from
# the server.
#
def viewer_fb_updated(self, viewer, x, y, w, h):
"""The Server has sent fresh pixel data, so we signal the Qt window to
redraw.
"""
viewer_widget.signal_updated.emit()
def server_fb_size_changed(self, viewer, w, h):
"""The Server screen size has changed, so we signal the Qt window to
resize to match its aspect ratio.
"""
aspect_ratio = w / float(h)
w = self.viewer.get_viewer_fb_width()
h = int(w / aspect_ratio)
self._set_buffer(w, h)
viewer_widget.signal_resized.emit(w, h, w, self.buffer, True)
def connected(self, viewer):
"""The Viewer's connection to the Server has succeeded."""
self.is_connected = True
viewer_widget.signal_connected.emit()
def disconnected(self, viewer, reason, flags):
"""The Viewer's connection to the Server has ended."""
message = ""
if vncsdk.Viewer.DisconnectFlags.ALERT_USER in flags:
if not self.is_connected:
message = \
"Disconnected while attempting to establish a connection"
message = "{msg}\nDisconnect reason: {reason}".format(
msg=message, reason=reason)
viewer_widget.signal_disconnected.emit(message)
def server_friendly_name_changed(self, viewer, name):
viewer_widget.signal_server_name_changed.emit(name)
#
# The following methods handle notifications of events sent from the Qt
# widget to the SDK.
#
def on_closed(self):
self.viewer.disconnect()
def on_widget_resized(self, w, h, event):
self._set_buffer(w, h)
viewer_widget.signal_resized.emit(w, h, w, self.buffer, False)
event.set()
def on_key_press(self, keysym, translate_unichar, keycode):
if translate_unichar:
keysym = vncsdk.unicode_to_keysym(keysym)
self.viewer.send_key_down(keysym, keycode)
def on_key_release(self, keycode):
self.viewer.send_key_up(keycode)
def on_pointer_event(self, x, y, button_mask):
self.viewer.send_pointer_event(x, y, button_mask, False)
def on_scroll_event(self, delta, axis):
self.viewer.send_scroll_event(delta, axis)
def on_focus_out_event(self):
self.viewer.release_all_keys()
def usage_advice():
"""Provide usage information on console."""
usage = sys.modules[__name__].__doc__.split('\n')[2:13]
print('\n'.join(usage))
def extract_port_num(arg):
"""Extract port number from command line argument."""
port = 0
try:
port = int(arg)
except ValueError:
print("Invalid port number\n")
return port
def parse_command_line():
"""Parse the command line to obtain connectivity details to be used when
listening for incoming connections. A simplistic approach is adopted:
3 arguments - Cloud connectivity to be used
[LOCAL_CLOUD_ADDRESS LOCAL_CLOUD_PASSWORD PEER_CLOUD_ADDRESS]
2 arguments - Direct TCP connectivity to be used
[TCP_ADDRESS TCP_PORT]
0 arguments - the built-in macros must be set appropriately
"""
global LOCAL_CLOUD_ADDRESS, LOCAL_CLOUD_PASSWORD, PEER_CLOUD_ADDRESS
global TCP_ADDRESS, TCP_PORT
global using_cloud
bad_args = False
argc = len(sys.argv)
# Parse any supplied command line arguments
if argc == 4 or argc == 3 or argc == 1:
if argc == 4: # Cloud arguments
LOCAL_CLOUD_ADDRESS = sys.argv[1]
LOCAL_CLOUD_PASSWORD = sys.argv[2]
PEER_CLOUD_ADDRESS = sys.argv[3]
elif argc == 3: # Direct TCP arguments
TCP_ADDRESS = sys.argv[1]
TCP_PORT = extract_port_num(sys.argv[2])
using_cloud = False
else: # Examine hard-coded values from global variables above
if LOCAL_CLOUD_ADDRESS or LOCAL_CLOUD_PASSWORD or \
PEER_CLOUD_ADDRESS:
using_cloud = True
elif TCP_PORT or TCP_ADDRESS:
using_cloud = False
# Check if all required connectivity details are provided either via
# editing the global variables above, or on the command-line
if using_cloud and (not LOCAL_CLOUD_ADDRESS or
not LOCAL_CLOUD_PASSWORD or
not PEER_CLOUD_ADDRESS):
bad_args = True
elif not using_cloud and (not TCP_PORT or not TCP_ADDRESS):
bad_args = True
else:
bad_args = True # Invalid number of arguments
if bad_args:
usage_advice()
sys.exit(1)
def sdk_main():
"""sdk_main() is the main method for the SDK thread. It initializes the
SDK, creates the SDK objects, and runs the SDK event loop. When sdk_main()
exits, the SDK thread has finished.
"""
try:
global using_cloud, direct_tcp_add_on_code, viewer_conn
# Create a logger with outputs to sys.stderr
vncsdk.Logger.create_stderr_logger()
# Create a file DataStore for storing persistent data for the viewer.
# Ideally this would be created in a directory that only the viewer
# user has access to.
vncsdk.DataStore.create_file_store("dataStore.txt")
# Initialize SDK and optional Add-Ons
vncsdk.init()
if not using_cloud:
try:
vncsdk.enable_add_on(direct_tcp_add_on_code)
except Exception as e:
print("Failed to enable Direct TCP add-on: " + str(e))
viewer_widget.signal_disconnected.emit(None)
return
# Create the SDK Viewer objects, and begin the connection to the Server
viewer_conn = ViewerConn()
viewer_handler = viewer_conn.viewer.get_connection_handler()
if using_cloud:
# Make a Cloud connection
print("Connecting via VNC Cloud")
print(" local address: {addr}".format(addr=LOCAL_CLOUD_ADDRESS))
print(" peer address: {addr}".format(addr=PEER_CLOUD_ADDRESS))
with vncsdk.CloudConnector(LOCAL_CLOUD_ADDRESS,
LOCAL_CLOUD_PASSWORD) \
as cloud_connector:
cloud_connector.connect(PEER_CLOUD_ADDRESS, viewer_handler)
else:
# Make a Direct TCP connection.
# Ignore this if you do not intend to use the Direct TCP add-on
print("Connecting to host address: {address} port: {port}".format(
address=TCP_ADDRESS, port=str(TCP_PORT)))
with vncsdk.DirectTcpConnector() as tcp_connector:
tcp_connector.connect(TCP_ADDRESS, TCP_PORT, viewer_handler)
# Run the SDK's event loop. This will return when the main thread
# calls vncsdk.EventLoop.stop(), allowing the SDK thread to exit.
vncsdk.EventLoop.run()
except:
import traceback
traceback.print_exc()
viewer_widget.signal_disconnected.emit(None)
finally:
if viewer_conn:
viewer_conn.destroy()
if __name__ == '__main__':
# Parse command line
parse_command_line()
# On the main thread, we create the QApplication and our viewer widget.
# These control the application's UI.
application = QtGui.QApplication(sys.argv)
viewer_conn = None
viewer_widget = ViewerWidget()
# We create a second thread for running the SDK.
sdk_thread = Thread(target=sdk_main)
sdk_thread.start()
# QApplication's exec_() method runs the main UI thread's event loop, which
# runs until the viewer window has closed.
application.exec_()
# After the UI has been closed, we stop the SDK thread and let it finish.
vncsdk.EventLoop.stop()
sdk_thread.join()
|
env_stock_papertrading_rllib.py
|
import datetime
import threading
import time
import alpaca_trade_api as tradeapi
import gym
import numpy as np
import pandas as pd
from finrl_meta.data_processors.alpaca import Alpaca
class StockEnvEmpty(gym.Env):
# Empty Env used for loading rllib agent
def __init__(self, config):
state_dim = config['state_dim']
action_dim = config['action_dim']
self.observation_space = gym.spaces.Box(low=-3000, high=3000, shape=(state_dim,), dtype=np.float32)
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(action_dim,), dtype=np.float32)
def reset(self):
return
def step(self, actions):
return
class AlpacaPaperTrading_rllib():
def __init__(self, ticker_list, time_interval, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
API_BASE_URL, tech_indicator_list, turbulence_thresh=30, max_stock=1e2):
# load agent
print('agent', agent)
if agent == 'ppo':
# load agent
from ray.rllib.agents import ppo
from ray.rllib.agents.ppo.ppo import PPOTrainer
config = ppo.DEFAULT_CONFIG.copy()
config['env'] = StockEnvEmpty
config["log_level"] = "WARN"
config['env_config'] = {'state_dim': state_dim,
'action_dim': action_dim, }
trainer = PPOTrainer(env=StockEnvEmpty, config=config)
trainer.restore(cwd)
try:
trainer.restore(cwd)
self.agent = trainer
print("Restoring from checkpoint path", cwd)
except:
raise ValueError('Fail to load agent!')
else:
raise ValueError('Agent input is NOT supported yet.')
# connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
# read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
# read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
# initialize account
self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None # cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index=ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if (self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time, last_equity])
np.save('paper_trading_records.npy', np.asarray(self.equities, dtype=float))
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while (not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
action = self.agent.compute_single_action(state)
action = (action * self.max_stock).astype(int)
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if (position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = Alpaca(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list=self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = (abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype=float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(max(self.cash, 1e4) * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if (qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
|
server.py
|
#!/usr/bin/env python
import sys
import io
import os
import shutil
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import picamera
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ws4py.server.wsgiutils import WebSocketWSGIApplication
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
###########################################
class StreamingHttpHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.do_GET()
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
return
elif self.path == '/jsmpg.js':
content_type = 'application/javascript'
content = self.server.jsmpg_content
elif self.path == '/index.html':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.index_template)
content = tpl.safe_substitute(dict(
ADDRESS='%s:%d' % (self.request.getsockname()[0], WS_PORT),
WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR, BGCOLOR=BGCOLOR))
else:
self.send_error(404, 'File not found')
return
content = content.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(content))
self.send_header('Last-Modified', self.date_time_string(time()))
self.end_headers()
if self.command == 'GET':
self.wfile.write(content)
class StreamingHttpServer(HTTPServer):
def __init__(self):
super(StreamingHttpServer, self).__init__(
('', HTTP_PORT), StreamingHttpHandler)
with io.open('index.html', 'r') as f:
self.index_template = f.read()
with io.open('jsmpg.js', 'r') as f:
self.jsmpg_content = f.read()
class StreamingWebSocket(WebSocket):
def opened(self):
self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
class BroadcastOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'avconv',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpeg1video',
'-b', '800k',
'-r', str(float(camera.framerate)),
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
try:
while True:
buf = self.converter.stdout.read(512)
if buf:
self.websocket_server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
cnn_util.py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for CNN benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow.compat.v1 as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
def log_fn(log):
print(log)
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List
import re
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
ENTRY_FOOTER: str = 'From Microsoft Teams'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError('Team member was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError('Tenant ID not found')
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph'
) -> Union[dict, list]:
"""
A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
:param method: any restful method
:param url: URL to query
:param json_: HTTP JSON body
:param api: API to query (graph/bot)
:return: requests.json()
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel returns 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
return False
decoded_payload: dict = jwt.decode(jwt_token, verify=False)
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
return False
# integration_context: dict = demisto.getIntegrationContext()
# open_id_metadata: dict = integration_context.get('open_id_metadata', {})
# keys: list = open_id_metadata.get('keys', [])
# last_updated: int = open_id_metadata.get('last_updated', 0)
# if last_updated < datetime.timestamp(datetime.now() + timedelta(days=5)):
# open_id_url: str = 'https://login.microsoftonline.com/common/v2.0/.well-known/openid-configuration'
# response: dict = http_request('GET', open_id_url, api='bot')
# jwks_uri: str = response.get('jwks_uri', '')
# keys_response: dict = http_request('GET', jwks_uri, api='bot')
# keys = keys_response.get('keys', [])
# last_updated = datetime.timestamp(datetime.now())
# open_id_metadata['keys'] = keys
# open_id_metadata['last_updated'] = last_updated
# if not keys:
# return False
# unverified_headers: dict = jwt.get_unverified_header(jwt_token)
# key_id: str = unverified_headers.get('kid', '')
# key_object: dict = dict()
# for key in keys:
# if key.get('kid') == key_id:
# key_object = key
# break
# if not key_object:
# return False
# public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
# options = {
# 'verify_aud': False,
# 'verify_exp': True
# }
# decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
return False
# integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
# demisto.setIntegrationContext(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = demisto.getIntegrationContext()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_users() -> list:
"""
Retrieves list of AAD users
:return: List of AAD users
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
users: dict = cast(Dict[Any, Any], http_request('GET', url))
return users.get('value', [])
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = demisto.getIntegrationContext()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = demisto.getIntegrationContext()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
conversation = {
'type': 'message',
'text': formatted_message
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = demisto.getIntegrationContext()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
try:
integration_context = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_mail', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = demisto.getIntegrationContext()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
demisto.setIntegrationContext(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
if port_mapping:
port: int = int(port_mapping.split(':')[0])
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
http_server = WSGIServer(('', port), APP)
http_server.serve_forever()
except Exception as e:
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
if command == 'long-running-execution':
LOG(str(e))
LOG.print_log()
demisto.updateModuleHealth(str(e))
else:
return_error(str(e))
if __name__ == 'builtins':
main()
|
install_utils.py
|
import getopt
import re
import subprocess
import sys
import threading
import time
sys.path = [".", "lib"] + sys.path
import testconstants
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from membase.api.rest_client import RestConnection
import install_constants
import TestInput
import logging.config
logging.config.fileConfig("scripts.logging.conf")
log = logging.getLogger()
NodeHelpers = []
# Default params
params = {
"version": None,
"install_tasks": install_constants.DEFAULT_INSTALL_TASKS,
"url": None,
"debug_logs": False,
"cb_edition": install_constants.CB_ENTERPRISE,
"timeout": install_constants.INSTALL_TIMEOUT,
"all_nodes_same_os": False,
"skip_local_download": True,
"storage_mode": "plasma",
"disable_consistency": False,
"enable_ipv6": False,
"use_domain_names": False,
"fts_quota": testconstants.FTS_QUOTA,
"fts_query_limit": 0
}
class build:
def __init__(self, name, url, path, product="cb"):
self.name = name
self.url = url
self.path = path
self.product = product
self.version = params["version"]
class NodeHelper:
def __init__(self, node):
self.node = node
self.ip = node.ip
self.params = params
self.build = None
self.queue = None
self.thread = None
self.rest = None
self.install_success = False
self.connect_ok = False
self.shell = None
self.info = None
self.enable_ipv6 = False
self.check_node_reachable()
def check_node_reachable(self):
start_time = time.time()
# Try 3 times
while time.time() < start_time + 60:
try:
self.shell = RemoteMachineShellConnection(self.node, exit_on_failure=False)
self.info = self.shell.extract_remote_info()
self.connect_ok = True
if self.connect_ok:
break
except Exception as e:
log.warn("{0} unreachable, {1}, retrying..".format(self.ip, e.message))
time.sleep(20)
def get_os(self):
os = self.info.distribution_version.lower()
to_be_replaced = ['\n', ' ', 'gnu/linux']
for _ in to_be_replaced:
if _ in os:
os = os.replace(_, '')
if self.info.deliverable_type == "dmg":
major_version = os.split('.')
os = major_version[0] + '.' + major_version[1]
return os
def uninstall_cb(self):
if install_constants.CMDS[self.info.deliverable_type]["uninstall"]:
cmd = install_constants.CMDS[self.info.deliverable_type]["uninstall"]
if "msi" in cmd:
'''WINDOWS UNINSTALL'''
self.shell.terminate_processes(self.info, [s for s in testconstants.WIN_PROCESSES_KILLED])
self.shell.terminate_processes(self.info, \
[s + "-*" for s in testconstants.COUCHBASE_FROM_VERSION_3])
installed_version, _ = self.shell.execute_command(
"cat " + install_constants.DEFAULT_INSTALL_DIR["WINDOWS_SERVER"] + "VERSION.txt")
if len(installed_version) == 1:
installed_msi, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] + "; ls *" + installed_version[
0] + "*.msi")
if len(installed_msi) == 1:
self.shell.execute_command(
install_constants.CMDS[self.info.deliverable_type]["uninstall"].replace("installed-msi",
installed_msi[0]))
for browser in install_constants.WIN_BROWSERS:
self.shell.execute_command("taskkill /F /IM " + browser + " /T")
else:
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["uninstall"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warn("Exception {0} occurred on {1}, retrying..".format(e.message, self.ip))
self.wait_for_completion(duration, event)
def pre_install_cb(self):
if install_constants.CMDS[self.info.deliverable_type]["pre_install"]:
cmd = install_constants.CMDS[self.info.deliverable_type]["pre_install"]
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["pre_install"]
if cmd is not None and "HDIUTIL_DETACH_ATTACH" in cmd:
start_time = time.time()
while time.time() < start_time + timeout:
try:
ret = hdiutil_attach(self.shell, self.build.path)
if ret:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warn("Exception {0} occurred on {1}, retrying..".format(e.message, self.ip))
self.wait_for_completion(duration, event)
def install_cb(self):
self.pre_install_cb()
if install_constants.CMDS[self.info.deliverable_type]["install"]:
if "suse" in self.get_os():
cmd = install_constants.CMDS[self.info.deliverable_type]["suse_install"]
else:
cmd = install_constants.CMDS[self.info.deliverable_type]["install"]
cmd = cmd.replace("buildbinary", self.build.name)
cmd = cmd.replace("buildpath", self.build.path)
cmd = cmd.replace("mountpoint", "/tmp/couchbase-server-" + params["version"])
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warn("Exception {0} occurred on {1}, retrying..".format(e.message, self.ip))
self.wait_for_completion(duration, event)
self.post_install_cb()
def post_install_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["post_install"]
start_time = time.time()
while time.time() < start_time + timeout:
try:
if install_constants.CMDS[self.info.deliverable_type]["post_install"]:
cmd = install_constants.CMDS[self.info.deliverable_type]["post_install"].replace("buildversion",
self.build.version)
o, e = self.shell.execute_command(cmd, debug=self.params["debug_logs"])
if o == ['1']:
break
else:
if install_constants.CMDS[self.info.deliverable_type]["post_install_retry"]:
if self.info.deliverable_type == "msi":
check_if_downgrade, _ = self.shell.execute_command(
"cd " + install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"] +
"; vi +\"set nobomb | set fenc=ascii | x\" install_status.txt; "
"grep 'Adding WIX_DOWNGRADE_DETECTED property' install_status.txt")
print(check_if_downgrade * 10)
else:
self.shell.execute_command(
install_constants.CMDS[self.info.deliverable_type]["post_install_retry"],
debug=self.params["debug_logs"])
self.wait_for_completion(duration, event)
except Exception as e:
log.warn("Exception {0} occurred on {1}, retrying..".format(e.message, self.ip))
self.wait_for_completion(duration, event)
def set_cbft_env_options(self, name, value, retries=3):
if self.get_os() in install_constants.LINUX_DISTROS:
while retries > 0:
if self.shell.file_exists("/opt/couchbase/bin/", "couchbase-server"):
ret, _ = self.shell.execute_command(install_constants.CBFT_ENV_OPTIONS[name].format(value))
self.shell.stop_server()
self.shell.start_server()
if ret == ['1']:
log.info("{0} set to {1} on {2}".format(name, value, self.ip))
break
else:
time.sleep(20)
retries -= 1
else:
print_result_and_exit("Unable to set fts_query_limit on {0}".format(self.ip))
def _get_cli_path(self):
if self.get_os() in install_constants.LINUX_DISTROS:
return install_constants.DEFAULT_CLI_PATH["LINUX_DISTROS"]
elif self.get_os() in install_constants.MACOS_VERSIONS:
return install_constants.DEFAULT_CLI_PATH["MACOS_VERSIONS"]
elif self.get_os() in install_constants.WINDOWS_SERVER:
return install_constants.DEFAULT_CLI_PATH["WINDOWS_SERVER"]
def _set_ip_version(self):
if params["enable_ipv6"]:
self.enable_ipv6 = True
if self.node.ip.startswith("["):
hostname = self.node.ip[self.node.ip.find("[") + 1:self.node.ip.find("]")]
else:
hostname = self.node.ip
cmd = install_constants.NODE_INIT["ipv6"].format(self._get_cli_path(),
self.ip,
hostname,
self.node.rest_username,
self.node.rest_password)
else:
cmd = install_constants.NODE_INIT["ipv4"].format(self._get_cli_path(),
self.ip,
self.node.rest_username,
self.node.rest_password)
self.shell.execute_command(cmd)
def pre_init_cb(self):
try:
self._set_ip_version()
if params["fts_query_limit"] > 0:
self.set_cbft_env_options("fts_query_limit", params["fts_query_limit"])
except Exception as e:
log.warn("Exception {0} occurred during pre-init".format(e.message))
def post_init_cb(self):
# Optionally change node name and restart server
if params.get('use_domain_names', False):
RemoteUtilHelper.use_hostname_for_server_settings(self.node)
# Optionally disable consistency check
if params.get('disable_consistency', False):
self.rest.set_couchdb_option(section='couchdb',
option='consistency_check_ratio',
value='0.0')
def get_services(self):
if not self.node.services:
return ["kv"]
elif self.node.services:
return self.node.services.split(',')
def allocate_memory_quotas(self):
kv_quota = 0
info = self.rest.get_nodes_self()
start_time = time.time()
while time.time() < start_time + 30 and kv_quota == 0:
kv_quota = int(info.mcdMemoryReserved * testconstants.CLUSTER_QUOTA_RATIO)
time.sleep(1)
self.services = self.get_services()
if "index" in self.services:
log.info("Setting INDEX memory quota as {0} MB on {1}".format(testconstants.INDEX_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=testconstants.INDEX_QUOTA)
kv_quota -= testconstants.INDEX_QUOTA
if "fts" in self.services:
log.info("Setting FTS memory quota as {0} MB on {1}".format(params["fts_quota"], self.ip))
self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=params["fts_quota"])
kv_quota -= params["fts_quota"]
if "cbas" in self.services:
log.info("Setting CBAS memory quota as {0} MB on {1}".format(testconstants.CBAS_QUOTA, self.ip))
self.rest.set_service_memoryQuota(service="cbasMemoryQuota", memoryQuota=testconstants.CBAS_QUOTA)
kv_quota -= testconstants.CBAS_QUOTA
if "kv" in self.services:
if kv_quota < testconstants.MIN_KV_QUOTA:
log.warning("KV memory quota is {0}MB but needs to be at least {1}MB on {2}".format(kv_quota,
testconstants.MIN_KV_QUOTA,
self.ip))
kv_quota = testconstants.MIN_KV_QUOTA
log.info("Setting KV memory quota as {0} MB on {1}".format(kv_quota, self.ip))
self.rest.init_cluster_memoryQuota(self.node.rest_username, self.node.rest_password, kv_quota)
def init_cb(self):
duration, event, timeout = install_constants.WAIT_TIMES[self.info.deliverable_type]["init"]
self.wait_for_completion(duration * 2, event)
start_time = time.time()
while time.time() < start_time + timeout:
try:
init_success = False
self.pre_init_cb()
self.rest = RestConnection(self.node)
# Make sure that data_path and index_path are writable by couchbase user
for path in set(filter(None, [self.node.data_path, self.node.index_path])):
for cmd in ("rm -rf {0}/*".format(path),
"chown -R couchbase:couchbase {0}".format(path)):
self.shell.execute_command(cmd)
self.rest.set_data_path(data_path=self.node.data_path, index_path=self.node.index_path)
self.allocate_memory_quotas()
self.rest.init_node_services(hostname=None,
username=self.node.rest_username,
password=self.node.rest_password,
services=self.get_services())
if "index" in self.get_services():
self.rest.set_indexer_storage_mode(storageMode=params["storage_mode"])
self.rest.init_cluster(username=self.node.rest_username,
password=self.node.rest_password)
init_success = True
if init_success:
break
self.wait_for_completion(duration, event)
except Exception as e:
log.warn("Exception {0} occurred on {1}, retrying..".format(e.message, self.ip))
self.wait_for_completion(duration, event)
self.post_init_cb()
def wait_for_completion(self, duration, event):
if params["debug_logs"]:
log.info(event.format(duration, self.ip))
time.sleep(duration)
def cleanup_cb(self):
cmd = install_constants.CMDS[self.info.deliverable_type]["cleanup"]
if cmd:
try:
# Delete all but the most recently accessed build binaries
self.shell.execute_command(cmd, debug=self.params["debug_logs"])
except:
#ok to ignore
pass
def _get_mounted_volumes(shell):
volumes, _ = shell.execute_command("ls /tmp | grep '{0}'".format("couchbase-server-"))
return volumes
def hdiutil_attach(shell, dmg_path):
volumes = _get_mounted_volumes(shell)
for volume in volumes:
shell.execute_command("hdiutil detach " + '"' + "/tmp/" + volume + '"')
shell.execute_command("umount " + '"' + "/tmp/" + volume + '"')
shell.execute_command("hdiutil attach {0} -mountpoint /tmp/{1}".
format(dmg_path, "couchbase-server-" + params["version"]))
return shell.file_exists("/tmp/", "couchbase-server-" + params["version"])
def get_node_helper(ip):
for node_helper in NodeHelpers:
if node_helper.ip == ip:
return node_helper
return None
def print_result_and_exit(err=None):
if err:
log.error(err)
success = []
fail = []
for server in params["servers"]:
node = get_node_helper(server.ip)
if not node or not node.install_success:
fail.append(server.ip)
elif node.install_success:
success.append(server.ip)
log.info("-" * 100)
for _ in fail:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in success:
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
if len(fail) > 0:
sys.exit(1)
def process_user_input():
params = _parse_user_input()
_params_validation()
return params
def _parse_user_input():
try:
(opts, args) = getopt.getopt(sys.argv[1:], 'hi:p:', [])
for o, a in opts:
if o == "-h":
print_result_and_exit(install_constants.USAGE)
if len(sys.argv) <= 1:
print_result_and_exit(install_constants.USAGE)
userinput = TestInput.TestInputParser.get_test_input(sys.argv)
except IndexError:
print_result_and_exit(install_constants.USAGE)
except getopt.GetoptError, err:
print_result_and_exit(str(err))
# Mandatory params
if not userinput.servers:
print_result_and_exit("No servers specified. Please use the -i parameter." + "\n" + install_constants.USAGE)
else:
params["servers"] = userinput.servers
# Validate and extract remaining params
for key, value in userinput.test_params.items():
if key == "debug_logs":
params["debug_logs"] = True if value.lower() == "true" else False
if key == "install_tasks":
tasks = []
for task in value.split('-'):
if task in install_constants.DEFAULT_INSTALL_TASKS and task not in tasks:
tasks.append(task)
if len(tasks) > 0:
params["install_tasks"] = tasks
log.info("INSTALL TASKS: {0}".format(params["install_tasks"]))
if "install" not in params["install_tasks"] and "init" not in params["install_tasks"]:
return params # No other parameters needed
if key == 'v' or key == "version":
if re.match('^[0-9\.\-]*$', value) and len(value) > 5:
params["version"] = value
if key == "url":
if value.startswith("http"):
params["url"] = value
else:
log.warn("URL:{0} is not valid, will use version to locate build".format(value))
if key == "type" or key == "edition" and value.lower() in install_constants.CB_EDITIONS:
params["edition"] = value.lower()
if key == "timeout" and int(value) > 60:
params["timeout"] = int(value)
if key == "storage_mode":
params["storage_mode"] = value
if key == "disable_consistency":
params["disable_consistency"] = True if value.lower() == "true" else False
if key == "skip_local_download":
params["skip_local_download"] = False if value.lower() == "false" else True
if key == "enable_ipv6":
if value.lower() == "true":
for server in params["servers"]:
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server.ip):
print_result_and_exit(
"Cannot enable IPv6 on an IPv4 machine: {0}. Please run without enable_ipv6=True.".format(
server.ip))
params["enable_ipv6"] = True
if key == "fts_quota" and int(value) >= 256:
params["fts_quota"] = int(value)
if key == "fts_query_limit" and int(value) > 0:
params["fts_query_limit"] = int(value)
if not params["version"] and not params["url"]:
print_result_and_exit("Need valid build version or url to proceed")
return params
def __check_servers_reachable():
reachable = []
unreachable = []
for server in params["servers"]:
try:
RemoteMachineShellConnection(server, exit_on_failure=False)
reachable.append(server.ip)
except Exception as e:
log.error(e.message)
unreachable.append(server.ip)
if len(unreachable) > 0:
log.info("-" * 100)
for _ in unreachable:
log.error("INSTALL FAILED ON: \t{0}".format(_))
log.info("-" * 100)
for _ in reachable:
# Marking this node as "completed" so it is not moved to failedInstall state
log.info("INSTALL COMPLETED ON: \t{0}".format(_))
log.info("-" * 100)
sys.exit(1)
def _params_validation():
__check_servers_reachable()
# Create 1 NodeHelper instance per VM
for server in params["servers"]:
NodeHelpers.append(NodeHelper(server))
# Version compatibility
node_os = []
for node in NodeHelpers:
if node.get_os() not in install_constants.SUPPORTED_OS:
print_result_and_exit("Install on {0} OS is not supported".format(node.get_os()))
else:
node_os.append(node.get_os())
if len(set(node_os)) == 1:
params["all_nodes_same_os"] = True
_check_version_compatibility(NodeHelpers[0])
else:
for node in NodeHelpers:
_check_version_compatibility(node)
# TODO: check if cb version is compatible with os
def _check_version_compatibility(node):
pass
def pre_install_steps():
if "install" in params["install_tasks"]:
if params["url"] is not None:
if NodeHelpers[0].shell.is_url_live(params["url"]):
params["all_nodes_same_os"] = True
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = params["url"]
filepath = __get_download_dir(node.get_os()) + build_binary
node.build = build(build_binary, build_url, filepath)
else:
print_result_and_exit("URL {0} is not live. Exiting.".format(params["url"]))
else:
for node in NodeHelpers:
build_binary = __get_build_binary_name(node)
build_url = __get_build_url(node, build_binary)
if not build_url:
print_result_and_exit(
"Build is not present in latestbuilds or release repos, please check {0}".format(build_binary))
filepath = __get_download_dir(node.get_os()) + build_binary
node.build = build(build_binary, build_url, filepath)
_download_build()
def _execute_local(command, timeout):
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).wait(timeout)
process.communicate()[0].strip()
def __copy_thread(src_path, dest_path, node):
logging.info("Copying %s to %s" % (src_path, node.ip))
node.shell.copy_file_local_to_remote(src_path, dest_path)
logging.info("Done copying build to %s.", node.ip)
def _copy_to_nodes(src_path, dest_path):
copy_threads = []
for node in NodeHelpers:
copy_to_node = threading.Thread(target=__copy_thread, args=(src_path, dest_path, node))
copy_threads.append(copy_to_node)
copy_to_node.start()
for thread in copy_threads:
thread.join()
def __get_build_url(node, build_binary):
if params["enable_ipv6"]:
ipv6_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_FQDN_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(ipv6_url, exit_if_not_live=False):
return ipv6_url
else:
latestbuilds_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
release_url = "{0}{1}/{2}/{3}".format(
testconstants.CB_RELEASE_REPO,
testconstants.CB_VERSION_NAME[(params["version"]).split('-')[0][:-2]],
params["version"].split('-')[1],
build_binary)
if node.shell.is_url_live(latestbuilds_url, exit_if_not_live=False):
return latestbuilds_url
elif node.shell.is_url_live(release_url, exit_if_not_live=False):
return release_url
return None
def _download_build():
if params["all_nodes_same_os"] and not params["skip_local_download"]:
build_url = NodeHelpers[0].build.url
filepath = NodeHelpers[0].build.path
timeout = install_constants.WAIT_TIMES[NodeHelpers[0].info.deliverable_type]["download_binary"]
cmd = install_constants.WGET_CMD.format(__get_download_dir(NodeHelpers[0].get_os()), build_url)
log.debug("Downloading build binary to {0}..".format(filepath))
_execute_local(cmd, timeout)
_copy_to_nodes(filepath, filepath)
else:
for node in NodeHelpers:
build_url = node.build.url
filepath = node.build.path
cmd = install_constants.DOWNLOAD_CMD[node.info.deliverable_type]
if "curl" in cmd:
cmd = cmd.format(build_url, filepath,
install_constants.WAIT_TIMES[node.info.deliverable_type]
["download_binary"])
elif "wget" in cmd:
cmd = cmd.format(__get_download_dir(node.get_os()), build_url)
logging.info("Downloading build binary to {0}:{1}..".format(node.ip, filepath))
check_and_retry_download_binary(cmd, node)
log.debug("Done downloading build binary")
def check_file_exists(node, filepath):
output, _ = node.shell.execute_command("ls -lh {0}".format(filepath), debug=params["debug_logs"])
for line in output:
if line.find('No such file or directory') == -1:
return True
return False
def check_and_retry_download_binary(cmd, node):
duration, event, timeout = install_constants.WAIT_TIMES[node.info.deliverable_type]["download_binary"]
time.sleep(duration)
start_time = time.time()
while time.time() < start_time + timeout:
try:
node.shell.execute_command(cmd, debug=params["debug_logs"])
if check_file_exists(node, node.build.path):
break
time.sleep(duration)
except Exception as e:
log.warn("Unable to download build: {0}, retrying..".format(e.message))
time.sleep(duration)
else:
print_result_and_exit("Unable to download build in {0}s on {1}, exiting".format(timeout, node.ip))
def __get_download_dir(os):
if os in install_constants.LINUX_DISTROS:
return install_constants.DOWNLOAD_DIR["LINUX_DISTROS"]
elif os in install_constants.MACOS_VERSIONS:
return install_constants.DOWNLOAD_DIR["MACOS_VERSIONS"]
elif os in install_constants.WINDOWS_SERVER:
return install_constants.DOWNLOAD_DIR["WINDOWS_SERVER"]
def __get_build_binary_name(node):
# couchbase-server-enterprise-6.5.0-4557-centos7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-suse15.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-rhel8.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-oel7.x86_64.rpm
# couchbase-server-enterprise-6.5.0-4557-amzn2.x86_64.rpm
if node.get_os() in install_constants.X86:
return "{0}-{1}-{2}.{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
node.info.architecture_type,
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-ubuntu16.04_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-debian8_amd64.deb
# couchbase-server-enterprise_6.5.0-4557-windows_amd64.msi
elif node.get_os() in install_constants.AMD64:
if "windows" in node.get_os():
node.info.deliverable_type = "msi"
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
node.get_os(),
"amd64",
node.info.deliverable_type)
# couchbase-server-enterprise_6.5.0-4557-macos_x86_64.dmg
elif node.get_os() in install_constants.MACOS_VERSIONS:
return "{0}_{1}-{2}_{3}.{4}".format(params["cb_edition"],
params["version"],
"macos",
node.info.architecture_type,
node.info.deliverable_type)
|
processor.py
|
'''Processor '''
import multiprocessing
import logging as log
from subprocess import Popen, PIPE
class Processor(object):
'''Processor '''
def __init__(self):
'''Init '''
self.processes = {}
def stop(self, worker):
'''Stop a Process'''
w_id = worker['id']
log.info('Stopping Process for Worker %d', w_id)
self.processes[w_id].terminate()
def start(self, worker):
'''Start a Process'''
w_id = worker['id']
log.info('Starting Process for Worker %d', w_id)
#Start a process
process = multiprocessing.Process(target=self._run, args=(worker,))
self.processes[w_id] = process
process.start()
def exitcode(self, worker):
'''Return Process Exit Code'''
w_id = worker['id']
return self.processes[w_id].exitcode
def is_alive(self, worker):
'''Check if Process is alive'''
w_id = worker['id']
return self.processes[w_id].is_alive()
def _get_command_line(self, worker):
'''Extract command line from worker'''
command = worker['command']['command']
args = worker['command']['args']
if args:
return '%s %s' % (command, args)
return command
def _run(self, worker):
'''Run a command locally'''
w_id = worker['id']
command = self._get_command_line(worker)
log.info('Running Command: %s', command)
output_file = 'out-%s.txt' % w_id
error_file = 'error-%s.txt' % w_id
p = Popen(command,
stdout=open(output_file, 'a'),
stderr=open(error_file, 'a'),
stdin=PIPE)
|
executorwebdriver.py
|
import json
import os
import socket
import threading
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
import webdriver as client
here = os.path.join(os.path.split(__file__)[0])
class WebDriverBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
try:
self.webdriver.timeouts.script = timeout
except client.WebDriverException:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=2057
body = {"type": "script", "ms": timeout * 1000}
self.webdriver.send_session_command("POST", "timeouts", body)
@property
def current_window(self):
return self.webdriver.window_handle
def set_window(self, handle):
self.webdriver.window_handle = handle
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except (client.TimeoutException, client.ScriptTimeoutException):
pass
except (socket.timeout, client.NoSuchWindowException,
client.UnknownErrorException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.url = url
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
exclude = self.webdriver.window_handle
handles = [item for item in self.webdriver.handles if item != exclude]
for handle in handles:
try:
self.webdriver.window_handle = handle
self.webdriver.close()
except client.NoSuchWindowException:
pass
self.webdriver.window_handle = exclude
return exclude
def get_test_window(self, window_id, parent):
test_window = None
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
else:
raise Exception("unable to find test window")
assert test_window != parent
return test_window
class WebDriverSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find.css(selector)
class WebDriverClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
self.logger.info("click " + repr(element))
return element.click()
class WebDriverSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
try:
return element.send_keys(keys)
except client.UnknownErrorException as e:
# workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=1999
if (e.http_status != 500 or
e.status_code != "unknown error"):
raise
return element.send_element_command("POST", "value", {"value": list(keys)})
class WebDriverActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.actions.perform(actions)
class WebDriverTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class WebDriverProtocol(Protocol):
implements = [WebDriverBaseProtocolPart,
WebDriverTestharnessProtocolPart,
WebDriverSelectorProtocolPart,
WebDriverClickProtocolPart,
WebDriverSendKeysProtocolPart,
WebDriverActionSequenceProtocolPart,
WebDriverTestDriverProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
super(WebDriverProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via WebDriver."""
self.logger.debug("Connecting to WebDriver on URL: %s" % self.url)
host, port = self.url.split(":")[1].strip("/"), self.url.split(':')[-1].strip("/")
capabilities = {"alwaysMatch": self.capabilities}
self.webdriver = client.Session(host, port, capabilities=capabilities)
self.webdriver.start()
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.window_handle
except (socket.timeout, client.UnknownErrorException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class WebDriverRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except client.UnknownErrorException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except (client.TimeoutException, client.ScriptTimeoutException):
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, client.UnknownErrorException):
self.result = False, ("CRASH", None)
except Exception as e:
if (isinstance(e, client.WebDriverException) and
e.http_status == 408 and
e.status_code == "asynchronous script timeout"):
# workaround for https://bugs.chromium.org/p/chromedriver/issues/detail?id=2001
self.result = False, ("EXTERNAL-TIMEOUT", None)
else:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class WebDriverTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""WebDriver-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = WebDriverRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script(self.script % format_map)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class WebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = WebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.protocol.webdriver.window.size = (600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return WebDriverRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.url = url
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.screenshot()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
workers.py
|
import logging
from threading import Thread, current_thread
from json import dumps
import requests
import s3
logger = logging.getLogger()
HEADERS = {'Content-type': 'application/json'}
def download_and_pass_data_thread(filesystem, bucket, uri, next_service):
"""Spawn a thread worker for data downloading task.
Requests the data to be downloaded and pass it to the next service
"""
def worker():
thread = current_thread()
logger.info('%s: Worker started', thread.name)
try:
# Fetch data
s3_data = s3.fetch(filesystem, bucket, uri)
logger.info(
'%s: Downloaded records %s',
thread.name,
s3_data.shape
)
# Build the POST data object
data = {
'data': s3_data.to_dict(),
# Set data identifier (for now, should be handled better)
'id': uri.split('/')[0]
}
# Pass to next service
requests.post(
f'http://{next_service}',
data=dumps(data),
headers=HEADERS
)
except FileNotFoundError as exception:
logger.warning('%s: %s', thread.name, exception)
except requests.HTTPError as exception:
logger.error('Unable to pass data: %s', exception)
logger.debug('%s: Done, exiting', thread.name)
thread = Thread(target=worker)
thread.start()
|
main.py
|
import argparse
import sys
import signal
import time
import os
import subprocess
from multiprocessing import Process, Pool
from multiprocessing.managers import BaseManager
from itertools import product
from termcolor import colored
from server_comm import ServerConnection, set_vars
from vlc_comm import player
from util import get_videos, path2title, Animation
from audio_extract import extract
TO_CLEAR = ["cache", "invite_link.txt", "invite_link.svg"]
def parse():
parser = argparse.ArgumentParser(
description="Route audio of a video file through a local server."
)
group = parser.add_mutually_exclusive_group()
parser.add_argument(
"-f",
"--file",
required=True,
dest="f",
help="Path to video files or directory containing video files",
type=str,
action="append",
)
parser.add_argument(
"-s", "--sub", dest="sub", help="Load subtitle File", type=str, action="store"
)
parser.add_argument(
"--qr", help="Show qr code with the link", dest="qr", action="store_true"
)
parser.add_argument(
"--control",
help="only host can control play/pause signals",
dest="onlyHost",
action="store_true",
)
parser.add_argument(
"--force-rebuild",
help="Force rebuild of the local server",
dest="rebuild",
action="store_true",
)
parser.add_argument(
"--audio-quality",
dest="q",
help="Audio quality to sync from",
choices=["low", "medium", "good", "high"],
type=str,
default="medium",
)
group.add_argument(
"--web",
help="Force routing through a web server",
dest="web",
action="store_true",
)
args = parser.parse_args()
videos = []
for i in range(len(args.f)):
args.f[i] = os.path.abspath(args.f[i])
videos.extend(get_videos(args.f[i], TO_CLEAR))
args.f = videos
return args
def convert_async(paths):
""" Converts video files to audio files asynchronously
using a pool of processes """
pool = Pool()
files = []
st = time.perf_counter()
print(f"[{colored('+','green')}] Extraction of audio started ...")
p = pool.starmap_async(extract, product(paths, [args.q]), callback=files.extend)
p.wait()
print(
f"[{colored('+','green')}] Completed extraction of {colored(len(paths),'yellow')} file(s) in {colored(time.perf_counter()-st,'yellow')} seconds"
)
return files
def exitHandler(*args, **kwargs):
os.system("killall node 2> /dev/null")
os.system("killall npm 2> /dev/null")
for file in TO_CLEAR:
if os.path.exists(file):
try:
os.remove(file)
except:
pass
sys.exit(0)
def spawn_server():
SERVER_PATH = "../../CommonAudioVideoServer/"
if not os.path.exists(SERVER_PATH):
print(
f"[{colored('-','red')}] Invalid Server Path, Try {colored(reinstalling,'red')} the package"
)
sys.exit(-1)
if not os.path.exists(SERVER_PATH + "node_modules"):
print(f"[{colored('+','green')}] Configuring the server ..")
anim = Animation()
subprocess.Popen(
"npm install".split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=os.getcwd() + "/" + SERVER_PATH,
).wait()
anim.complete()
print(f"[{colored('+','green')}] Server configuration complete ..")
if args.rebuild:
print(f"[{colored('+','green')}] Building server ..")
anim = Animation()
subprocess.Popen(
"npm run compile".split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=os.getcwd() + "/" + SERVER_PATH,
).wait()
anim.complete()
print(f"[{colored('+','green')}] Server build successfull ..")
print(f"[{colored('+','green')}] Initializing Server ..")
anim = Animation()
proc = subprocess.Popen(
"npm start".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.getcwd() + "/" + SERVER_PATH,
)
for line in iter(proc.stdout.readline, ""):
if b"npm ERR!" in line:
print(colored(line, "red"))
print(
f"[{colored('-','red')}] An error has occured while starting the server\nRestarting the server"
)
os.system("killall node")
os.system("killall npm")
sys.exit(-1)
if b"Press CTRL-C to stop" in line:
anim.complete()
break
def initialize(videos, server, first=False):
audio = convert_async(videos)
for video in videos:
if args.web:
server.upload(video, video[:-3] + "ogg")
else:
server.addAudioPath(video, video[:-3] + "ogg")
player.enqueue(video)
if first:
server.create_room(video)
player.play()
player.pause()
player.seek(0)
else:
server.add_track(video)
if __name__ == "__main__":
signal.signal(signal.SIGINT, exitHandler)
args = parse()
set_vars(args)
if not args.web:
spawn_server()
player.launch(args.sub)
BaseManager.register("ServerConnection", ServerConnection)
manager = BaseManager()
manager.start()
server = manager.ServerConnection()
server.start_listening()
Process(target=player.update, args=(server,)).start()
initialize([args.f[0]], server=server, first=True)
if len(args.f) > 1:
Process(
target=initialize,
kwargs={"videos": args.f[1:], "server": server, "first": False},
).run()
print("\n" + colored("#" * 70, "green") + "\n")
while True:
time.sleep(1)
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import with_seed, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@with_seed()
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
@with_seed()
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@with_seed()
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
@with_seed()
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@with_seed()
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
@with_seed()
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
@with_seed()
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@with_seed()
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@with_seed()
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
|
client.py
|
import logging
try:
import queue
except ImportError: # pragma: no cover
import Queue as queue
import signal
import ssl
import threading
import time
import six
from six.moves import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
if six.PY2: # pragma: no cover
ConnectionError = OSError
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if client.is_asyncio_based():
client.start_background_task(client.disconnect, abort=True)
else:
client.disconnect(abort=True)
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.http = None
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if not logging.root.handlers and \
self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers={}, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, six.string_types):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data, binary=None):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object compatible with the `Thread` class in
the Python standard library. The `start()` method on this object is
already called by this function.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None:
self._reset()
raise exceptions.ConnectionError(
'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), r.json())
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
six.raise_from(exceptions.ConnectionError(
'Unexpected response from server'), None)
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.warning('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get the cookies from the long-polling connection so that they can
# also be sent the the WebSocket route
cookies = None
if self.http:
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
extra_options = {}
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING,
data=six.text_type('probe')).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
# start background tasks associated with this client
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PONG:
self.pong_received = True
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout, verify=self.ssl_verify)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=3').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _ping_loop(self):
"""This background task sends a PING to the server at the requested
interval.
"""
self.pong_received = True
if self.ping_loop_event is None:
self.ping_loop_event = self.create_event()
else:
self.ping_loop_event.clear()
ping_timout = 0
while self.state == 'connected':
if not self.pong_received:
ping_timout += self.ping_interval
if(ping_timout >= self.ping_timeout):
self.logger.info(
'PONG response has not been received, aborting')
if self.ws:
self.ws.close(timeout=0)
self.queue.put(None)
break
else:
ping_timout = 0
self.pong_received = False
self._send_packet(packet.Packet(packet.PING))
self.ping_loop_event.wait(timeout=self.ping_interval)
self.logger.info('Exiting ping task')
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content)
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
if isinstance(p, six.text_type): # pragma: no cover
p = p.encode('utf-8')
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
self.logger.info('Waiting for ping loop task to end')
if self.ping_loop_event: # pragma: no cover
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'application/octet-stream'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None:
self.logger.warning(
'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode(always_bytes=False)
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
threaded_video_stream.py
|
import cv2
from threading import Thread
class threadedVideoStream(object):
def __init__(self, src=0, resolution=None, fps=None):
self.stream = cv2.VideoCapture(src)
if resolution != None:
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
if fps !=None:
self.stream.set(cv2.CAP_PROP_FPS, fps)
(self.grabbed, self.frame) = self.stream.read()
# thread stop flag
self.stopped = False
# start the thread to read frames from the video stream
t = Thread(target=self.update, name="VideoStream", args=())
t.daemon = True
t.start()
def update(self):
while self.stopped == False:
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return (self.grabbed, self.frame)
def release(self):
self.stopped = True
self.stream.release()
|
camera_stream.py
|
##########################################################################
# threaded frame capture from camera to avoid camera frame buffering delays
# (always delivers the latest frame from the camera)
# Copyright (c) 2018-2021 Toby Breckon, Durham University, UK
# Copyright (c) 2015-2016 Adrian Rosebrock, http://www.pyimagesearch.com
# MIT License (MIT)
# based on code from this tutorial, with changes to make object method call
# compatible with cv2.VideoCapture(src) as far as possible, optional OpenCV
# Transparent API support (disabled by default) and improved thread management:
# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
##########################################################################
# suggested usage - as per example in canny.py found at:
# https://github.com/tobybreckon/python-examples-cv/blob/master/canny.py
# try:
# import camera_stream
# cap = camera_stream.CameraVideoStream()
# print("INFO: using CameraVideoStream() threaded capture")
# except BaseException:
# print("INFO: CameraVideoStream() module not found")
# cap = cv2.VideoCapture()
# in the above example this makes use of the CameraVideoStream if it is
# available (i.e. camera_stream.py is in the module search path) and
# falls back to using cv2.VideoCapture otherwise
##########################################################################
# import the necessary packages
from threading import Thread
import cv2
import sys
import atexit
##########################################################################
# handle older versions of OpenCV (that had a different constuctor
# prototype for cv2.VideoCapture() it appears) semi-gracefully
(majorCV, minorCV, _) = cv2.__version__.split(".")
if ((majorCV <= '3') and (minorCV <= '4')):
raise NameError('OpenCV version < 3.4,'
+ ' not compatible with CameraVideoStream()')
##########################################################################
# set up global variables and atexit() function to facilitate safe thread exit
# without a segfault from the VideoCapture object as experienced on some
# platforms
# (as __del__ and __exit__ are not called outside a 'with' construct)
exitingNow = False # global flag for program exit
threadList = [] # list of current threads (i.e. multi-camera/thread safe)
###########################
def closeDownAllThreadsCleanly():
global exitingNow
global threadList
# set exit flag to cause each thread to exit
exitingNow = True
# for each thread wait for it to exit
for thread in threadList:
thread.join()
###########################
atexit.register(closeDownAllThreadsCleanly)
##########################################################################
class CameraVideoStream:
def __init__(self, src=None, backend=None,
name="CameraVideoStream", use_tapi=False):
# initialize the thread name
self.name = name
# initialize the variables used to indicate if the thread should
# be stopped or suspended
self.stopped = False
self.suspend = False
# set these to null values initially
self.grabbed = 0
self.frame = None
# set OpenCV Transparent API usage
self.tapi = use_tapi
# set some sensible backends for real-time video capture from
# directly connected hardware on a per-OS basis,
# that can we overidden via the open() method
if sys.platform.startswith('linux'): # all Linux
self.backend_default = cv2.CAP_V4L
elif sys.platform.startswith('win'): # MS Windows
self.backend_default = cv2.CAP_DSHOW
elif sys.platform.startswith('darwin'): # macOS
self.backend_default = cv2.CAP_QT
else:
self.backend_default = cv2.CAP_ANY # auto-detect via OpenCV
# if a source was specified at init, proceed to open device
if not(src is None):
self.open(src, backend)
def open(self, src=0, backend=None):
# determine backend to specified by user
if (backend is None):
backend = self.backend_default
# check if aleady opened via init method
if (self.grabbed > 0):
return True
# initialize the video camera stream and read the first frame
# from the stream
self.camera = cv2.VideoCapture(src, backend)
(self.grabbed, self.frame) = self.camera.read()
# only start the thread if in-fact the camera read was successful
if (self.grabbed):
# create the thread to read frames from the video stream
thread = Thread(target=self.update, name=self.name, args=())
# append thread to global array of threads
threadList.append(thread)
# get thread id we will use to address thread on list
self.threadID = len(threadList) - 1
# start thread and set it to run in background
threadList[self.threadID].daemon = True
threadList[self.threadID].start()
return (self.grabbed > 0)
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set or exiting, stop the
# thread
if self.stopped or exitingNow:
self.grabbed = 0 # set flag to ensure isOpen() returns False
self.camera.release() # cleanly release camera hardware
return
# otherwise, read the next frame from the stream
# provided we are not suspended
if not(self.suspend):
(self.grabbed, self.frame) = self.camera.read()
def grab(self):
# return status of most recent grab by the thread
return self.grabbed
def retrieve(self):
# same as read() in the context of threaded capture
return self.read()
def read(self):
# return the frame most recently read
if (self.tapi):
# return OpenCV Transparent API UMat frame for H/W acceleration
return (self.grabbed, cv2.UMat(self.frame))
# return standard numpy frame
return (self.grabbed, self.frame)
def isOpened(self):
# indicate that the camera is open successfully
return (self.grabbed > 0)
def release(self):
# indicate that the thread should be stopped
self.stopped = True
def set(self, property_name, property_value):
# set a video capture property (behavior as per OpenCV manual for
# VideoCapture)
# first suspend thread
self.suspend = True
# set value - wrapping it in grabs() so it takes effect
self.camera.grab()
ret_val = self.camera.set(property_name, property_value)
self.camera.grab()
# whilst we are still suspended flush the frame buffer held inside
# the object by reading a new frame with new settings otherwise a race
# condition will exist between the thread's next call to update() after
# it un-suspends and the next call to read() by the object user
(self.grabbed, self.frame) = self.camera.read()
# restart thread by unsuspending it
self.suspend = False
return ret_val
def get(self, property_name):
# get a video capture property (behavior as per OpenCV manual for
# VideoCapture)
return self.camera.get(property_name)
def getBackendName(self):
# get a video capture backend (behavior as per OpenCV manual for
# VideoCapture)
return self.camera.getBackendName()
def getExceptionMode(self):
# get a video capture exception mode (behavior as per OpenCV manual for
# VideoCapture)
return self.camera.getExceptionMode()
def setExceptionMode(self, enable):
# get a video capture exception mode (behavior as per OpenCV manual for
# VideoCapture)
return self.camera.setExceptionMode(enable)
def __del__(self):
self.stopped = True
self.suspend = True
def __exit__(self, exec_type, exc_value, traceback):
self.stopped = True
self.suspend = True
##########################################################################
|
broker.py
|
# -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for controlling instances of cloud-testenv-broker processes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import json
import os
import os.path
import socket
import subprocess
import threading
import time
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.command_lib.emulators import util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core.util import platforms
import httplib2
import six.moves.http_client
import six.moves.urllib.error
import six.moves.urllib.parse
import six.moves.urllib.request
class BrokerError(exceptions.Error):
"""All errors raised by this module subclass BrokerError."""
pass
class BrokerNotRunningError(BrokerError):
pass
class RequestError(BrokerError):
"""Errors associated with failed HTTP requests subclass RequestError."""
pass
class RequestTimeoutError(RequestError):
pass
class RequestSocketError(RequestError):
"""A socket error. Check the errno field."""
def __init__(self, *args, **kwargs):
super(RequestSocketError, self).__init__(*args)
self.errno = None
def SocketConnResetErrno():
"""The errno value for a socket connection reset error."""
current_os = platforms.OperatingSystem.Current()
if current_os == platforms.OperatingSystem.WINDOWS:
return errno.WSAECONNRESET # pytype: disable=module-attr
return errno.ECONNRESET
def SocketConnRefusedErrno():
"""The errno value for a socket connection refused error."""
current_os = platforms.OperatingSystem.Current()
if current_os == platforms.OperatingSystem.WINDOWS:
return errno.WSAECONNREFUSED # pytype: disable=module-attr
return errno.ECONNREFUSED
def _Await(fn, timeout_secs):
"""Waits up to timeout_secs for fn() to return True."""
deadline = time.time() + timeout_secs
while time.time() < deadline:
if fn():
return True
time.sleep(0.2)
return False
def _EmulatorPath(emulator_id=None, verb=None):
"""Builds a broker request path for operating on the specified emulator."""
path = '/v1/emulators'
if emulator_id:
path += '/' + six.moves.urllib.parse.quote(emulator_id)
if verb:
path += ':' + six.moves.urllib.parse.quote(verb) # pytype: disable=wrong-arg-types
return path
class Broker(object):
"""Broker manages a single instance of a broker process.
The broker process may be started through an instance of this class. An
already-running process can be manipulated through an instance of this class
as well.
"""
def __init__(self, address, config_file=None, broker_dir=None):
"""Constructor.
Args:
address: (str) The host or host-port of the broker server. The server may
already be running.
config_file: (str) The full path to the broker config file.
broker_dir: (str) A custom path to the broker directory.
"""
if config_file is not None:
assert os.path.isabs(config_file)
self._address = address
self._config_file = config_file
if broker_dir:
self._broker_dir = broker_dir
else:
self._broker_dir = os.path.join(util.GetCloudSDKRoot(), 'bin', 'broker')
self._host_port = arg_parsers.HostPort.Parse(address)
self._current_platform = platforms.Platform.Current()
self._process = None
self._comm_thread = None
def Start(self, redirect_output=False, logtostderr=False, wait_secs=10):
"""Starts the broker server, optionally with output redirection.
Args:
redirect_output: (bool) Whether to merge the stdout and stderr of the
broker server with the current process' output.
logtostderr: (bool) Whether the broker should log to stderr instead of
to a log file.
wait_secs: (float) The maximum time to wait for the broker to start
serving.
Raises:
BrokerError: If start failed.
"""
if self._process or self.IsRunning():
# Already started, possibly by another process.
return
args = [self._BrokerBinary()]
if self._host_port.host:
args.append('--host={0}'.format(self._host_port.host))
if self._host_port.port:
args.append('--port={0}'.format(self._host_port.port))
if self._config_file:
args.append('--config_file={0}'.format(self._config_file))
if logtostderr:
args.append('--logtostderr') # Disables file logging.
# The broker is run as a detached (daemon) process.
popen_args = self._current_platform.AsyncPopenArgs()
log.info('Starting broker: %r', args)
if redirect_output:
# Pipe the broker's output to our own, communicating on another thread
# to avoid blocking the current thread.
self._process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_args)
# pytype: disable=wrong-arg-types
self._comm_thread = threading.Thread(target=self._process.communicate)
# pytype: enable=wrong-arg-types
self._comm_thread.start()
else:
self._process = subprocess.Popen(args, **popen_args)
if not _Await(self.IsRunning, wait_secs):
log.warning('Broker did not start within {0}s'.format(wait_secs))
try:
# Clean up.
self.Shutdown()
except BrokerError:
pass
raise BrokerError('Broker failed to start')
log.info('Started broker: %s' % self._address)
def IsRunning(self):
"""Returns True iff the broker is known to be running."""
# We issue an RPC to check if the broker is running.
try:
response, _ = self._SendJsonRequest('GET', _EmulatorPath(),
timeout_secs=1.0)
return response.status == six.moves.http_client.OK
except RequestError:
return False
def Shutdown(self, wait_secs=10):
"""Shuts down the broker server.
Args:
wait_secs: (float) The maximum time to wait for the broker to shutdown.
Raises:
BrokerError: If shutdown failed.
"""
if self._process:
try:
execution_utils.KillSubprocess(self._process)
self._process = None
if self._comm_thread:
self._comm_thread.join()
self._comm_thread = None
except RuntimeError as e:
log.warning('Failed to shutdown broker: %s' % e)
raise BrokerError('Broker failed to shutdown: %s' % e)
else:
# Invoke the /shutdown handler.
try:
self._SendJsonRequest('POST', '/shutdown')
except RequestSocketError as e:
if e.errno not in (SocketConnRefusedErrno(), SocketConnResetErrno()):
raise
# We may get an exception reading the response to the shutdown
# request, because the shutdown may preempt the response.
if not _Await(lambda: not self.IsRunning(), wait_secs):
log.warning('Failed to shutdown broker: still running after {0}s'.format(
wait_secs))
raise BrokerError('Broker failed to shutdown: timed-out')
log.info('Shutdown broker.')
def CreateEmulator(self,
emulator_id,
path,
args,
target_patterns,
resolved_host=None):
"""Creates a new emulator entry.
Args:
emulator_id: (str) The emulator id
path: (str) The path to the emulator binary.
args: (list of str) The command line arguments to the emulator.
target_patterns: (list or str) The regular expressions used to match
input targets for the emulator.
resolved_host: (str) The address to use when resolving the new emulator.
Only specified if the lifetime of this emulator is not managed by
this broker.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the creation failed.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to create emulator')
emulator = {
'emulator_id': emulator_id,
'start_command': {
'path': path,
'args': args,
},
'rule': {
'rule_id': emulator_id,
'target_patterns': target_patterns,
}
}
if resolved_host:
emulator['rule']['resolved_host'] = resolved_host
url = _EmulatorPath()
body = json.dumps(emulator)
response, data = self._SendJsonRequest('POST', url, body=body)
if response.status != six.moves.http_client.OK:
log.warning('Failed to create emulator: {0} ({1})'.format(
response.reason, response.status))
raise BrokerError('Failed to create emulator: %s' % data)
def GetEmulator(self, emulator_id):
"""Returns emulator entry (Json dict).
Args:
emulator_id: (str) The id of the emulator to get.
Returns:
A Json dict representation of a google.emulators.Emulator proto message.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the get failed.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to get emulator: %s' % emulator_id)
response, data = self._SendJsonRequest('GET', _EmulatorPath(emulator_id))
if response.status != six.moves.http_client.OK:
raise BrokerError('Failed to get emulator: %s' % data)
return json.loads(data)
def ListEmulators(self):
"""Returns the list of emulators, or None.
Returns:
A list of Json dicts representing google.emulators.Emulator proto
messages, or None if the list operation fails.
Raises:
BrokerNotRunningError: If the broker is not running.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to list emulators')
try:
response, data = self._SendJsonRequest('GET', _EmulatorPath())
if response.status != six.moves.http_client.OK:
log.warning('Failed to list emulators: {0} ({1})'
.format(response.reason, response.status))
return
except RequestError:
return
list_response = json.loads(data)
try:
return list_response['emulators']
except KeyError:
# The expected values were not present.
return
def StartEmulator(self, emulator_id):
"""Starts the specified emulator via the broker, which must be running.
Args:
emulator_id: (str) The id of the emulator to start.
Returns:
True if the emulator is started. False if it was already running, cannot
be started, or is unknown.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the emulator could not be started for another reason.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to start emulator: %s' % emulator_id)
url = _EmulatorPath(emulator_id, verb='start')
response, data = self._SendJsonRequest('POST', url)
if response.status != six.moves.http_client.OK:
log.warning('Failed to start emulator {0}: {1} ({2})'.format(
emulator_id, response.reason, response.status))
raise BrokerError('Failed to start emulator: %s' % data)
def StopEmulator(self, emulator_id):
"""Stops the specified emulator via the broker, which must be running.
Args:
emulator_id: (str) The id of the emulator to stop.
Returns:
True if the emulator is stopped or wasn't running to begin with. False
if the emulator could not be stopped or is unknown.
Raises:
BrokerNotRunningError: If the broker is not running.
BrokerError: If the emulator could not be stopped for another reason.
"""
if not self.IsRunning():
raise BrokerNotRunningError('Failed to stop emulator: %s' % emulator_id)
url = _EmulatorPath(emulator_id, verb='stop')
response, data = self._SendJsonRequest('POST', url)
if response.status != six.moves.http_client.OK:
log.warning('Failed to stop emulator {0}: {1} ({2})'.format(
emulator_id, response.reason, response.status))
raise BrokerError('Failed to stop emulator: %s' % data)
def _BrokerBinary(self):
"""Returns the path to the broker binary."""
return '{0}/broker'.format(self._broker_dir)
def _SendJsonRequest(self, method, path, body=None, timeout_secs=300):
"""Sends a request to the broker.
Args:
method: (str) The HTTP method.
path: (str) The URI path.
body: (str) The request body.
timeout_secs: (float) The request timeout, in seconds.
Returns:
(HTTPResponse, str) or (None, None).
Raises:
RequestTimeoutError: The request timed-out.
RequestSocketError: The request failed due to a socket error.
RequestError: The request errored out in some other way.
"""
uri = 'http://{0}{1}'.format(self._address, path)
http_client = http.HttpClient(timeout=timeout_secs)
try:
http_response, body = http_client.request(
uri=uri,
method=method,
headers={'Content-Type': 'application/json; charset=UTF-8'},
body=body)
return http_response, body.decode('utf-8')
except socket.error as e:
if isinstance(e, socket.timeout):
raise RequestTimeoutError(e)
error = RequestSocketError(e)
if e.errno:
error.errno = e.errno
raise error
except six.moves.http_client.HTTPException as e:
if isinstance(e, six.moves.http_client.ResponseNotReady):
raise RequestTimeoutError(e)
raise RequestError(e)
except httplib2.HttpLib2Error as e:
raise RequestError(e)
|
benchmarker.py
|
from setup.linux.installer import Installer
from benchmark import framework_test
import os
import json
import subprocess
import time
import textwrap
import pprint
import csv
import sys
import logging
import socket
from multiprocessing import Process
from datetime import datetime
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print textwrap.dedent("""
=====================================================
Preparing Server, Database, and Client ...
=====================================================
""")
self.__setup_server()
self.__setup_database()
self.__setup_client()
##########################
# Run tests
##########################
self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
=====================================================
Parsing Results ...
=====================================================
""")
self.__parse_results(all_tests)
self.__finish()
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# report_results
############################################################
def report_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
self.results['rawData'][test][framework.name] = results
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = []
# Loop through each directory (we assume we're being run from the benchmarking root)
# and look for the files that signify a benchmark test
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will set up our tests.
# Its format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
config_file_name = os.path.join(dirname, 'benchmark_config')
with open(config_file_name, 'r') as config_file:
# Load json file into config object
try:
config = json.load(config_file)
except:
print("Error loading '%s'." % config_file_name)
raise
if config == None:
continue
tests = tests + framework_test.parse_config(config, dirname[2:], self)
tests.sort(key=lambda x: x.name)
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Gathers all the frameworks
############################################################
def __gather_frameworks(self):
frameworks = []
# Loop through each directory (we assume we're being run from the benchmarking root)
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will contain our framework name
# It's format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
# Load json file into config object
config = json.load(config_file)
if config == None:
continue
frameworks.append(str(config['framework']))
return frameworks
############################################################
# End __gather_frameworks
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
self.__run_test(test)
else:
logging.debug("Executing __run_tests on Linux")
# These features do not work on Windows
for test in tests:
if __name__ == 'benchmark.benchmarker':
test_process = Process(target=self.__run_test, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
if(test_process.is_alive()):
logging.debug("Child process for %s is still alive. Terminating.",test.name)
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
logging.debug("End __run_tests.")
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
# If the user specified which tests to run, then
# we can skip over tests that are not in that list
if self.test != None and test.name not in self.test:
return
if hasattr(test, 'skip'):
if test.skip.lower() == "true":
logging.info("Test %s benchmark_config specifies to skip this test. Skipping.", test.name)
return
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
# the operating system requirements of this test for the
# application server or the database server don't match
# our current environment
logging.info("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.")
return
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
logging.info("Test %s has been added to the excludes list. Skipping.", test.name)
return
# If the test does not contain an implementation of the current test-type, skip it
if self.type != 'all' and not test.contains_type(self.type):
logging.info("Test type %s does not contain an implementation of the current test-type. Skipping", self.type)
return
logging.debug("test.os.lower() = %s test.database_os.lower() = %s",test.os.lower(),test.database_os.lower())
logging.debug("self.results['frameworks'] != None: " + str(self.results['frameworks'] != None))
logging.debug("test.name: " + str(test.name))
logging.debug("self.results['completed']: " + str(self.results['completed']))
if self.results['frameworks'] != None and test.name in self.results['completed']:
logging.info('Framework %s found in latest saved data. Skipping.',str(test.name))
return
print textwrap.dedent("""
=====================================================
Beginning {name}
-----------------------------------------------------
""".format(name=test.name))
##########################
# Start this test
##########################
print textwrap.dedent("""
-----------------------------------------------------
Starting {name}
-----------------------------------------------------
""".format(name=test.name))
try:
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongodb
sudo /etc/init.d/postgresql restart
""")
time.sleep(10)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
print textwrap.dedent("""
---------------------------------------------------------
Error: Port {port} is not available before start {name}
---------------------------------------------------------
""".format(name=test.name, port=str(test.port)))
return
result = test.start()
if result != 0:
test.stop()
time.sleep(5)
print "ERROR: Problem starting " + test.name
print textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name))
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
print textwrap.dedent("""
-----------------------------------------------------
Verifying URLs for {name}
-----------------------------------------------------
""".format(name=test.name))
test.verify_urls()
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
-----------------------------------------------------
Benchmarking {name} ...
-----------------------------------------------------
""".format(name=test.name))
test.benchmark()
##########################
# Stop this test
##########################
test.stop()
time.sleep(5)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
print textwrap.dedent("""
-----------------------------------------------------
Error: Port {port} was not released by stop {name}
-----------------------------------------------------
""".format(name=test.name, port=str(test.port)))
return
print textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name))
time.sleep(5)
##########################################################
# Save results thus far into toolset/benchmark/latest.json
##########################################################
print textwrap.dedent("""
----------------------------------------------------
Saving results through {name}
----------------------------------------------------
""".format(name=test.name))
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
except (OSError, IOError, subprocess.CalledProcessError):
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
print textwrap.dedent("""
-----------------------------------------------------
Subprocess Error {name}
-----------------------------------------------------
""".format(name=test.name))
try:
test.stop()
except (subprocess.CalledProcessError):
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
print textwrap.dedent("""
-----------------------------------------------------
Subprocess Error: Test .stop() raised exception {name}
-----------------------------------------------------
""".format(name=test.name))
except (KeyboardInterrupt, SystemExit):
test.stop()
print """
-----------------------------------------------------
Cleaning up....
-----------------------------------------------------
"""
self.__finish()
sys.exit()
############################################################
# End __run_tests
############################################################
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
# This is assumed to be run from the benchmark root directory
#############################################################
def __count_sloc(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
lineCount = subprocess.check_output(command, shell=True)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
lineCount = lineCount.strip('code: ')
lineCount = lineCount[0:lineCount.rfind('comment')]
jsonResult[framework['name']] = int(lineCount)
except:
continue
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
############################################################
def __count_commits(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except:
continue
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
############################################################
# __finish
############################################################
def __finish(self):
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
self.__dict__.update(args)
self.start_time = time.time()
self.run_test_timeout_seconds = 3600
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# setup results and latest_results directories
self.result_directory = os.path.join("results", self.name)
self.latest_results_directory = self.latest_results_directory()
if self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# Setup the concurrency levels array. This array goes from
# starting_concurrency to max concurrency, doubling each time
self.concurrency_levels = []
concurrency = self.starting_concurrency
while concurrency <= self.max_concurrency:
self.concurrency_levels.append(concurrency)
concurrency = concurrency * 2
# Setup query interval array
# starts at 1, and goes up to max_queries, using the query_interval
self.query_intervals = []
queries = 1
while queries <= self.max_queries:
self.query_intervals.append(queries)
if queries == 1:
queries = 0
queries = queries + self.query_interval
# Load the latest data
#self.latest = None
#try:
# with open('toolset/benchmark/latest.json', 'r') as f:
# # Load json file into config object
# self.latest = json.load(f)
# logging.info("toolset/benchmark/latest.json loaded to self.latest")
# logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
#except IOError:
# logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
#
#self.results = None
#try:
# if self.latest != None and self.name in self.latest.keys():
# with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
# # Load json file into config object
# self.results = json.load(f)
#except IOError:
# pass
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test %s not found.",self.name)
if self.results == None:
self.results = dict()
self.results['name'] = self.name
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_intervals
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install_software:
install = Installer(self)
install.install_software()
############################################################
# End __init__
############################################################
|
test_tcp.py
|
import sys
import time
import logging
import threading
from poap.strategy import FixedSampleStrategy
from poap.tcpserve import ThreadedTCPServer
from poap.tcpserve import SocketWorker
# Set up default host, port, and time
TIMEOUT = 0
def f(x):
logging.info("Request for {0}".format(x))
if TIMEOUT > 0:
time.sleep(TIMEOUT)
logging.info("OK, done")
return (x-1.23)*(x-1.23)
def worker_main(name):
logging.info("Launching worker on port {0}".format(name[1]))
SocketWorker(sockname=name, retries=1).run()
def main():
logging.basicConfig(format="%(name)-18s: %(levelname)-8s %(message)s",
level=logging.INFO)
# Launch controller
strategy = FixedSampleStrategy([1, 2, 3, 4, 5])
server = ThreadedTCPServer(strategy=strategy)
cthread = threading.Thread(target=server.run)
cthread.start()
# Get controller port
name = server.sockname
logging.info("Launch controller at {0}".format(name))
# Launch workers
wthreads = []
for k in range(2):
wthread = threading.Thread(target=worker_main, args=(name,))
wthread.start()
wthreads.append(wthread)
# Wait on controller and workers
cthread.join()
for t in wthreads:
t.join()
result = server.controller.best_point()
print("Final: {0:.3e} @ {1}".format(result.value, result.params))
if __name__ == '__main__':
if len(sys.argv) > 1:
TIMEOUT = float(sys.argv[1])
main()
|
test_throttle.py
|
# -*- coding: utf-8 -*-
import time
import threading
import pytest
from xTool.utils.throttle import (
BoundedEmptySemaphore,
GlobalThrottle,
LocalThrottle,
throttle,
)
def test_BoundedEmptySemaphore():
max_unused = 2
semaphore = BoundedEmptySemaphore(max_unused)
semaphore.release()
semaphore.release()
with pytest.raises(ValueError):
semaphore.release()
def test_GlobalThrottle():
@GlobalThrottle(1, 2)
def f():
pass
time.sleep(1)
begin_time = time.time()
t_list = []
for _ in range(5):
t = threading.Thread(target=f)
t.start()
t_list.append(t)
for t in t_list:
t.join()
end_time = time.time()
duration = end_time - begin_time
assert duration >= 4 and duration < 5
def test_LocalThrottle():
def f():
@LocalThrottle(1)
def f1():
pass
f1()
f1()
f1()
begin_time = time.time()
t = threading.Thread(target=f)
t.start()
t.join()
end_time = time.time()
duration = end_time - begin_time
assert duration >= 2 and duration < 3
def test_throttle():
start = time.time()
with throttle(1):
pass
assert 1 <= time.time() - start <= 1.1
@throttle(1)
def f():
pass
start = time.time()
f()
assert 1 <= time.time() - start <= 1.1
start = time.time()
with throttle(1):
time.sleep(2)
assert 2 <= time.time() - start <= 2.1
@throttle(1)
def f():
time.sleep(2)
start = time.time()
f()
assert 2 <= time.time() - start <= 2.1
start = time.time()
try:
with throttle(1):
raise ValueError("foo")
except ValueError:
end = time.time()
assert 0 <= end - start <= 0.1
@throttle(1)
def f():
raise ValueError("foo")
start = time.time()
try:
f()
except ValueError:
end = time.time()
assert 0 <= end - start <= 0.1
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test amerox shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTC':8, 'mBTC':5, 'uBTC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain',
{'tx': 'transactions', 'addr': 'addresses'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp',
{'tx': 'Transaction', 'addr': 'Address'}),
'Blockchain.info': ('https://blockchain.info',
{'tx': 'tx', 'addr': 'address'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion',
{'tx': 'tx', 'addr': 'address'}),
'Blockr.io': ('https://btc.blockr.io',
{'tx': 'tx/info', 'addr': 'address/info'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC',
{'tx': 'tx', 'addr': 'address'}),
'BTC.com': ('https://chain.btc.com',
{'tx': 'tx', 'addr': 'address'}),
'Chain.so': ('https://www.chain.so',
{'tx': 'tx/BTC', 'addr': 'address/BTC'}),
'Insight.is': ('https://insight.bitpay.com',
{'tx': 'tx', 'addr': 'address'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain',
{'tx': 'tx', 'addr': 'address'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc',
{'tx': 'tx', 'addr': 'address'}),
'Blockchair.com': ('https://blockchair.com/bitcoin',
{'tx': 'transaction', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.NetworkConstants.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except OSError as e:
print_error("OSError", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
|
__init__.py
|
import subprocess as sp
import multiprocessing as mp
from finntk.omor.anlys import pairs_to_dict
def analysis_to_pairs(ana):
for bit in ana.split("]|["):
k, v = bit.strip("[]").split("=", 1)
yield k, v
def parse_finnpos_line(line):
surf, _, lemma, feats, _ = line.split("\t")
return surf, lemma, feats
def analysis_to_dict(ana):
return pairs_to_dict(analysis_to_pairs(ana))
def batch_finnpos(source_iter, *args, maxsize=0, **kwargs):
"""
Shovel sentences through FinnPOS. A typical use case for this would be when
you want to tag a bunch of sentences with FinnPOS at the same time as doing
some other kind of transformation.
`source_iter` should be an iterator returning pairs `(sent, extra)` where
sent is a list of tokens and `extra` is any sentence identifier you would
like to pass through. It will be run in a new process. If you would like to
pass arguments to it, pass them in as extra arguments to `batch_finnpos`.
"""
done_sentinel = object()
def source_func(finnpos, queue, *args, **kwargs):
for sent, extra in source_iter(*args, **kwargs):
finnpos.feed_sent(sent)
queue.put(extra)
queue.put(done_sentinel)
with FinnPOSCtx() as finnpos:
ctx = mp.get_context("fork")
id_queue = ctx.Queue(maxsize=0)
source_proc = ctx.Process(
target=source_func, args=(finnpos, id_queue) + args, kwargs=kwargs
)
source_proc.start()
while 1:
extra = id_queue.get()
if extra is done_sentinel:
break
tagged_sent = finnpos.get_analys()
yield tagged_sent, extra
source_proc.join()
class FinnPOS():
def __init__(self):
self.proc = sp.Popen(
["ftb-label"], stdin=sp.PIPE, stdout=sp.PIPE, universal_newlines=True
)
def cleanup(self):
if not self.proc.stdin.closed:
self.proc.stdin.close()
def feed_sent(self, sent):
for token in sent:
self.proc.stdin.write(token)
self.proc.stdin.write("\n")
self.proc.stdin.write("\n")
self.proc.stdin.flush()
def get_analys(self):
tagged_sent = []
for line in self.proc.stdout:
if line == "\n":
break
surf, lemma, feats_str = parse_finnpos_line(line[:-1])
feats = analysis_to_dict(feats_str)
tagged_sent.append((surf, lemma, feats))
return tagged_sent
def __del__(self):
self.cleanup()
def __call__(self, sent):
"""
Transform a single sentence with FinnPOS.
Note that using this repeatedly serialises your processing pipeline
sentence-by-sentence. If performance is a concern, consider using
`batch_finnpos` if possible in this situation.
"""
self.feed_sent(sent)
return self.get_analys()
class FinnPOSCtx():
"""
This helper lets you get an instance of `FinnPOS` and ensures it is
correctly cleaned up. Usually you should use this instead of instantiating
FinnPOS directly.
"""
def __enter__(self):
self.finnpos = FinnPOS()
def __exit__(self):
self.finnpos.cleanup()
del self.finnpos
_global_finnpos = None
def sent_finnpos(sent):
FinnPOS.__call__.__doc__ + """
This function will keep a single global copy of FinnPOS running.
Note that this function is not thread safe and is a convenience for
exploratory programming only. The recommended method is to use FinnPOSCtx.
"""
global _global_finnpos
if _global_finnpos is None:
_global_finnpos = FinnPOS()
return _global_finnpos(sent)
def cleanup():
"""
Cleanup the global FinnPOS instance kept by `sent_finnpos`. If you want to
use this, you should consider using `FinnPOSCtx` instead.
"""
global _global_finnpos
_global_finnpos = None
|
test_browser_credential.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import functools
import random
import socket
import threading
import time
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from azure.identity import AuthenticationRequiredError, CredentialUnavailableError, InteractiveBrowserCredential
from azure.identity._internal import AuthCodeRedirectServer
from azure.identity._internal.user_agent import USER_AGENT
from msal import TokenCache
import pytest
from six.moves import urllib, urllib_parse
from helpers import (
build_aad_response,
build_id_token,
get_discovery_response,
mock_response,
Request,
validating_transport,
)
try:
from unittest.mock import Mock, patch
except ImportError: # python < 3.3
from mock import Mock, patch # type: ignore
WEBBROWSER_OPEN = InteractiveBrowserCredential.__module__ + ".webbrowser.open"
def test_no_scopes():
"""The credential should raise when get_token is called with no scopes"""
with pytest.raises(ValueError):
InteractiveBrowserCredential().get_token()
def test_authenticate():
client_id = "client-id"
environment = "localhost"
issuer = "https://" + environment
tenant_id = "some-tenant"
authority = issuer + "/" + tenant_id
access_token = "***"
scope = "scope"
# mock AAD response with id token
object_id = "object-id"
home_tenant = "home-tenant-id"
username = "me@work.com"
id_token = build_id_token(aud=client_id, iss=issuer, object_id=object_id, tenant_id=home_tenant, username=username)
auth_response = build_aad_response(
uid=object_id, utid=home_tenant, access_token=access_token, refresh_token="**", id_token=id_token
)
transport = validating_transport(
requests=[Request(url_substring=issuer)] * 3,
responses=[get_discovery_response(authority)] * 2 + [mock_response(json_payload=auth_response)],
)
# mock local server fakes successful authentication by immediately returning a well-formed response
oauth_state = "state"
auth_code_response = {"code": "authorization-code", "state": [oauth_state]}
server_class = Mock(return_value=Mock(wait_for_redirect=lambda: auth_code_response))
with patch(InteractiveBrowserCredential.__module__ + ".uuid.uuid4", lambda: oauth_state):
with patch(WEBBROWSER_OPEN, lambda _: True):
credential = InteractiveBrowserCredential(
_cache=TokenCache(),
authority=environment,
client_id=client_id,
server_class=server_class,
tenant_id=tenant_id,
transport=transport,
)
record = credential.authenticate(scopes=(scope,))
assert record.authority == environment
assert record.home_account_id == object_id + "." + home_tenant
assert record.tenant_id == home_tenant
assert record.username == username
# credential should have a cached access token for the scope used in authenticate
with patch(WEBBROWSER_OPEN, Mock(side_effect=Exception("credential should authenticate silently"))):
token = credential.get_token(scope)
assert token.token == access_token
def test_disable_automatic_authentication():
"""When configured for strict silent auth, the credential should raise when silent auth fails"""
empty_cache = TokenCache() # empty cache makes silent auth impossible
transport = Mock(send=Mock(side_effect=Exception("no request should be sent")))
credential = InteractiveBrowserCredential(
disable_automatic_authentication=True, transport=transport, _cache=empty_cache
)
with patch(WEBBROWSER_OPEN, Mock(side_effect=Exception("credential shouldn't try interactive authentication"))):
with pytest.raises(AuthenticationRequiredError):
credential.get_token("scope")
@patch("azure.identity._credentials.browser.webbrowser.open", lambda _: True)
def test_policies_configurable():
policy = Mock(spec_set=SansIOHTTPPolicy, on_request=Mock())
transport = validating_transport(
requests=[Request()] * 2,
responses=[get_discovery_response(), mock_response(json_payload=build_aad_response(access_token="**"))],
)
# mock local server fakes successful authentication by immediately returning a well-formed response
oauth_state = "oauth-state"
auth_code_response = {"code": "authorization-code", "state": [oauth_state]}
server_class = Mock(return_value=Mock(wait_for_redirect=lambda: auth_code_response))
credential = InteractiveBrowserCredential(
policies=[policy], transport=transport, server_class=server_class, _cache=TokenCache()
)
with patch("azure.identity._credentials.browser.uuid.uuid4", lambda: oauth_state):
credential.get_token("scope")
assert policy.on_request.called
@patch("azure.identity._credentials.browser.webbrowser.open", lambda _: True)
def test_user_agent():
transport = validating_transport(
requests=[Request(), Request(required_headers={"User-Agent": USER_AGENT})],
responses=[get_discovery_response(), mock_response(json_payload=build_aad_response(access_token="**"))],
)
# mock local server fakes successful authentication by immediately returning a well-formed response
oauth_state = "oauth-state"
auth_code_response = {"code": "authorization-code", "state": [oauth_state]}
server_class = Mock(return_value=Mock(wait_for_redirect=lambda: auth_code_response))
credential = InteractiveBrowserCredential(transport=transport, server_class=server_class, _cache=TokenCache())
with patch("azure.identity._credentials.browser.uuid.uuid4", lambda: oauth_state):
credential.get_token("scope")
@patch("azure.identity._credentials.browser.webbrowser.open")
def test_interactive_credential(mock_open):
mock_open.side_effect = _validate_auth_request_url
oauth_state = "state"
client_id = "client-id"
expected_refresh_token = "refresh-token"
expected_token = "access-token"
expires_in = 3600
authority = "authority"
tenant_id = "tenant_id"
endpoint = "https://{}/{}".format(authority, tenant_id)
discovery_response = get_discovery_response(endpoint=endpoint)
transport = validating_transport(
requests=[Request(url_substring=endpoint)] * 3
+ [
Request(
authority=authority, url_substring=endpoint, required_data={"refresh_token": expected_refresh_token}
)
],
responses=[
discovery_response, # instance discovery
discovery_response, # tenant discovery
mock_response(
json_payload=build_aad_response(
access_token=expected_token,
expires_in=expires_in,
refresh_token=expected_refresh_token,
uid="uid",
utid=tenant_id,
id_token=build_id_token(aud=client_id, object_id="uid", tenant_id=tenant_id, iss=endpoint),
token_type="Bearer",
)
),
mock_response(
json_payload=build_aad_response(access_token=expected_token, expires_in=expires_in, token_type="Bearer")
),
],
)
# mock local server fakes successful authentication by immediately returning a well-formed response
auth_code_response = {"code": "authorization-code", "state": [oauth_state]}
server_class = Mock(return_value=Mock(wait_for_redirect=lambda: auth_code_response))
credential = InteractiveBrowserCredential(
authority=authority,
tenant_id=tenant_id,
client_id=client_id,
server_class=server_class,
transport=transport,
instance_discovery=False,
validate_authority=False,
_cache=TokenCache(),
)
# The credential's auth code request includes a uuid which must be included in the redirect. Patching to
# set the uuid requires less code here than a proper mock server.
with patch("azure.identity._credentials.browser.uuid.uuid4", lambda: oauth_state):
token = credential.get_token("scope")
assert token.token == expected_token
assert mock_open.call_count == 1
# token should be cached, get_token shouldn't prompt again
token = credential.get_token("scope")
assert token.token == expected_token
assert mock_open.call_count == 1
# As of MSAL 1.0.0, applications build a new client every time they redeem a refresh token.
# Here we patch the private method they use for the sake of test coverage.
# TODO: this will probably break when this MSAL behavior changes
app = credential._get_app()
app._build_client = lambda *_: app.client # pylint:disable=protected-access
now = time.time()
# expired access token -> credential should use refresh token instead of prompting again
with patch("time.time", lambda: now + expires_in):
token = credential.get_token("scope")
assert token.token == expected_token
assert mock_open.call_count == 1
# ensure all expected requests were sent
assert transport.send.call_count == 4
@patch("azure.identity._credentials.browser.webbrowser.open", lambda _: True)
def test_interactive_credential_timeout():
# mock transport handles MSAL's tenant discovery
transport = Mock(
send=lambda _, **__: mock_response(
json_payload={"authorization_endpoint": "https://a/b", "token_endpoint": "https://a/b"}
)
)
# mock local server blocks long enough to exceed the timeout
timeout = 0.01
server_instance = Mock(wait_for_redirect=functools.partial(time.sleep, timeout + 0.01))
server_class = Mock(return_value=server_instance)
credential = InteractiveBrowserCredential(
client_id="guid",
server_class=server_class,
timeout=timeout,
transport=transport,
instance_discovery=False, # kwargs are passed to MSAL; this one prevents an AAD verification request
_cache=TokenCache(),
)
with pytest.raises(ClientAuthenticationError) as ex:
credential.get_token("scope")
assert "timed out" in ex.value.message.lower()
def test_redirect_server():
# binding a random port prevents races when running the test in parallel
server = None
for _ in range(4):
try:
port = random.randint(1024, 65535)
server = AuthCodeRedirectServer(port, timeout=10)
break
except socket.error:
continue # keep looking for an open port
assert server, "failed to start redirect server"
expected_param = "expected-param"
expected_value = "expected-value"
# the server's wait is blocking, so we do it on another thread
thread = threading.Thread(target=server.wait_for_redirect)
thread.daemon = True
thread.start()
# send a request, verify the server exposes the query
url = "http://127.0.0.1:{}/?{}={}".format(port, expected_param, expected_value) # nosec
response = urllib.request.urlopen(url) # nosec
assert response.code == 200
assert server.query_params[expected_param] == [expected_value]
@patch("azure.identity._credentials.browser.webbrowser.open", lambda _: False)
def test_no_browser():
transport = validating_transport(requests=[Request()] * 2, responses=[get_discovery_response()] * 2)
credential = InteractiveBrowserCredential(
client_id="client-id", server_class=Mock(), transport=transport, _cache=TokenCache()
)
with pytest.raises(ClientAuthenticationError, match=r".*browser.*"):
credential.get_token("scope")
def test_cannot_bind_port():
"""get_token should raise CredentialUnavailableError when the redirect listener can't bind a port"""
credential = InteractiveBrowserCredential(server_class=Mock(side_effect=socket.error))
with pytest.raises(CredentialUnavailableError):
credential.get_token("scope")
def _validate_auth_request_url(url):
parsed_url = urllib_parse.urlparse(url)
params = urllib_parse.parse_qs(parsed_url.query)
assert params.get("prompt") == ["select_account"], "Auth code request doesn't specify 'prompt=select_account'."
# when used as a Mock's side_effect, this method's return value is the Mock's return value
# (the real webbrowser.open returns a bool)
return True
|
threading-worker.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import threading
import logging
import time
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s')
def worker(_id):
logging.debug("Worker: %d" % i)
time.sleep(1)
return
[threading.Thread(name="thread-%03d" % i, target=worker, args=(i,)).start() for i in range(6)]
|
3.ThreadLocal1.py
|
from multiprocessing.dummy import threading
global_local = threading.local()
def show_name():
print(f"[{threading.current_thread().name}]{global_local.name}")
def task1():
global_local.name = "小明"
show_name()
def task2():
global_local.name = "小张"
show_name()
def main():
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
main()
|
elg_demo.py
|
#!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from datasources import Video, Webcam
from models import ELG
import util.gaze
if __name__ == '__main__':
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in device_lib.list_local_devices(config=session_config)
if d.device_type == 'GPU']
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Record output frames to file if requested
if args.record_video:
video_out = None
video_out_queue = queue.Queue()
video_out_should_stop = False
video_out_done = threading.Condition()
def _record_frame():
global video_out
last_frame_time = None
out_fps = 30
out_frame_interval = 1.0 / out_fps
while not video_out_should_stop:
frame_index = video_out_queue.get()
if frame_index is None:
break
assert frame_index in data_source._frames
frame = data_source._frames[frame_index]['bgr']
h, w, _ = frame.shape
if video_out is None:
video_out = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'H264'),
out_fps, (w, h),
)
now_time = time.time()
if last_frame_time is not None:
time_diff = now_time - last_frame_time
while time_diff > 0.0:
video_out.write(frame)
time_diff -= out_frame_interval
last_frame_time = now_time
video_out.release()
with video_out_done:
video_out_done.notify_all()
record_thread = threading.Thread(target=_record_frame, name='record')
record_thread.daemon = True
record_thread.start()
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
cv.imshow('vis', next_frame['bgr'])
if args.record_video:
video_out_queue.put_nowait(next_frame_index)
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
for j in range(batch_size):
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
for landmark in frame_landmarks[f][:-1]:
cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
color=(0, 0, 255), markerType=cv.MARKER_STAR,
markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
cv.drawMarker( # Eyeball centre
bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
else:
gaze_history.clear()
if can_use_eyelid:
pass
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if not args.headless:
cv.imshow('vis', bgr)
last_frame_index = frame_index
# Record frame?
if args.record_video:
video_out_queue.put_nowait(frame_index)
# Quit?
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
while True:
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
inferred_stuff_queue.put_nowait(output)
if not visualize_thread.isAlive():
break
if not data_source._open:
break
# Close video recording
if args.record_video and video_out is not None:
video_out_should_stop = True
video_out_queue.put_nowait(None)
with video_out_done:
video_out_done.wait()
|
multiprocessing.py
|
#
# Copyright 2021-2022 Johannes Laurin Hörmann
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Multiprocessing utils."""
# NOTE: depending on platform, we may have to experiment with the forking methods,
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
import asyncio
import logging
import multiprocessing # run task as child process to avoid side effects
import queue
import traceback # forward exception from child process to parent process
logger = logging.getLogger(__name__)
def process_initializer():
"""Initialize process pool workers."""
# Avoids warning
# PyGIWarning: Gtk was imported without specifying a version first.
# Use gi.require_version('Gtk', '3.0') before import to ensure that the right version gets loaded.
# when launching process pool. Would forking / spawning global process pool
# before Gtk initialization be another option?
import gi
gi.require_version('Gtk', '3.0')
# inspired by
# https://stackoverflow.com/questions/19924104/python-multiprocessing-handling-child-errors-in-parent
class Process(multiprocessing.Process):
"""
Class which returns child Exceptions to Parent.
https://stackoverflow.com/a/33599967/4992248
"""
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._parent_conn, self._child_conn = multiprocessing.Pipe()
self._exception = None
def run(self):
try:
super().run()
self._child_conn.send(None)
except Exception as e:
tb = traceback.format_exc()
self._child_conn.send((e, tb))
raise e # You can still rise this exception if you need to
@property
def exception(self):
if self._parent_conn.poll():
self._exception = self._parent_conn.recv()
return self._exception
class TargetWrapper:
def __init__(self, target):
self._target = target
def __call__(self, return_value_queue, status_progress_queue, *args):
class StatusReportClass:
def update(status_report):
logger.debug(f"Child process queues status report {status_report}")
status_progress_queue.put(status_report)
return_value_queue.put(self._target(*args, status_report_callback=StatusReportClass))
class StatusReportingChildProcessBuilder:
"""Outsource serial functions with status report handlers.
The status report handler is expected to conform to the
click.ProgressBar interface. In particular, it must exhibit an
update(val) method.
For any function that runs serial and reports status via such a callback,
this wrapper can run them in a non-blocking forked process and forward the
status reports via queue to the callback.
The function must have the signature
func(*args, status_report_callback=None)
"""
def __init__(self, target, status_report_callback):
self._target_wrapper = TargetWrapper(target)
self._status_report_handler = status_report_callback
async def __call__(self, *args):
"""Spawn child process to assure my environment stays untouched."""
return_value_queue = multiprocessing.Queue()
status_progress_queue = multiprocessing.Queue()
process = Process(target=self._target_wrapper, args=[return_value_queue, status_progress_queue, *args])
process.start()
# wait for child to queue its return value and
# check whether child raises exception
while return_value_queue.empty():
# if child raises exception, then it has terminated
# before queueing any return value
if process.exception:
error, p_traceback = process.exception
raise ChildProcessError(p_traceback)
try:
status_report = status_progress_queue.get_nowait()
except queue.Empty:
pass
else:
logger.debug(f"Parent process received status report {status_report}")
self._status_report_handler.update(status_report)
await asyncio.sleep(0.1)
return_value = return_value_queue.get()
# for any child that never raises an exception and does not queue
# anything to the return_value_queue, will deadlock here
process.join()
return return_value
def test_function(steps, status_report_callback):
for n in range(steps):
print(f"Child process step {n}")
status_report_callback.update(n)
return True
class test_handler:
def update(n):
print(f"Test callback received report for step {n}")
async def test_run():
test_process = StatusReportingChildProcessBuilder(test_function, test_handler)
return_value = await test_process(10)
print(f"Child process returned {return_value}.")
|
test_rsocket.py
|
import py, errno, sys
from rpython.rlib import rsocket
from rpython.rlib.rsocket import *
import socket as cpy_socket
from rpython.translator.c.test.test_genc import compile
def setup_module(mod):
rsocket_startup()
def test_ipv4_addr():
a = INETAddress("localhost", 4000)
assert a.get_host() == "127.0.0.1"
assert a.get_port() == 4000
a = INETAddress("", 4001)
assert a.get_host() == "0.0.0.0"
assert a.get_port() == 4001
a = INETAddress("<broadcast>", 47002)
assert a.get_host() == "255.255.255.255"
assert a.get_port() == 47002
py.test.raises(GAIError, INETAddress, "no such host exists", 47003)
res = repr(a)
assert res == "<INETAddress 255.255.255.255:47002>"
def test_unix_addr():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
a = UNIXAddress("/tmp/socketname")
assert a.get_path() == "/tmp/socketname"
def test_netlink_addr():
if getattr(rsocket, 'AF_NETLINK', None) is None:
py.test.skip('AF_NETLINK not supported.')
pid = 1
group_mask = 64 + 32
a = NETLINKAddress(pid, group_mask)
assert a.get_pid() == pid
assert a.get_groups() == group_mask
def test_gethostname():
s = gethostname()
assert isinstance(s, str)
def test_gethostbyname():
for host in ["localhost", "127.0.0.1"]:
a = gethostbyname(host)
assert isinstance(a, INETAddress)
assert a.get_host() == "127.0.0.1"
def test_gethostbyname_ex():
for host in ["localhost", "127.0.0.1"]:
name, aliases, address_list = gethostbyname_ex(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert host in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
# no IPV6, should always return IPV4
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_gethostbyaddr():
try:
cpy_socket.gethostbyaddr("::1")
except cpy_socket.herror:
ipv6 = HSocketError
except cpy_socket.gaierror:
ipv6 = GAIError
else:
ipv6 = None
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and ipv6:
with py.test.raises(ipv6):
gethostbyaddr(host)
continue
name, aliases, address_list = gethostbyaddr(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert 'localhost' in allnames or 'ip6-localhost' in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
if host != '127.0.0.1': # name lookup might return IPV6
if isinstance(a, INET6Address) and a.get_host() == "::1":
break # ok
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_getservbyname():
assert getservbyname('http') == 80
assert getservbyname('http', 'tcp') == 80
def test_getservbyport():
assert getservbyport(80) == cpy_socket.getservbyport(80)
assert getservbyport(80, 'tcp') == cpy_socket.getservbyport(80)
def test_getprotobyname():
assert getprotobyname('tcp') == IPPROTO_TCP
assert getprotobyname('udp') == IPPROTO_UDP
def test_socketpair():
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
s1.sendall('?')
buf = s2.recv(100)
assert buf == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
buf = s1.recv(100)
assert buf == 'x'*count
s1.close()
s2.close()
def test_socketpair_inheritable():
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
for inh in [False, True]:
s1, s2 = socketpair(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
assert sock_get_inheritable(s2.fd) == inh
s1.close()
s2.close()
def test_socketpair_recvinto_1():
class Buffer:
def setslice(self, start, string):
self.x = string
def get_raw_address(self):
raise ValueError
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n = s2.recvinto(buf, 1)
assert n == 1
assert buf.x == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n = s1.recvinto(buf, 100)
assert n == count
assert buf.x == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvinto_2():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n = s2.recvinto(buf, 1)
assert n == 1
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n = s1.recvinto(buf, 100)
assert n == count
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvfrom_into_1():
class Buffer:
def setslice(self, start, string):
self.x = string
def get_raw_address(self):
raise ValueError
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n, addr = s2.recvfrom_into(buf, 1)
assert n == 1
assert addr is None
assert buf.x == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n, addr = s1.recvfrom_into(buf, 100)
assert n == count
assert addr is None
assert buf.x == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvfrom_into_2():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n, addr = s2.recvfrom_into(buf, 1)
assert n == 1
assert addr is None
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n, addr = s1.recvfrom_into(buf, 100)
assert n == count
assert addr is None
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
def test_simple_tcp():
from rpython.rlib import rthread
sock = RSocket()
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
connected = [False] #thread-mutable list
def connecting():
try:
s2.connect(addr)
connected[0] = True
finally:
lock.release()
lock = rthread.allocate_lock()
lock.acquire(True)
rthread.start_new_thread(connecting, ())
print 'waiting for connection'
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
print 'connection accepted'
lock.acquire(True)
assert connected[0]
print 'connecting side knows that the connection was accepted too'
assert addr.eq(s2.getpeername())
#assert addr2.eq(s2.getsockname())
assert addr2.eq(s1.getpeername())
s1.send('?')
print 'sent one character'
buf = s2.recv(100)
assert buf == '?'
print 'received ok'
def sendstuff():
s2.sendall('x'*50000)
rthread.start_new_thread(sendstuff, ())
buf = ''
while len(buf) < 50000:
data = s1.recv(50100)
print 'recv returned %d bytes' % (len(data,))
assert data
buf += data
assert buf == 'x'*50000
print 'data received ok'
s1.shutdown(SHUT_RDWR)
s1.close()
s2.close()
def test_simple_udp():
s1 = RSocket(AF_INET, SOCK_DGRAM)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
s1.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(s1.getsockname())
s2 = RSocket(AF_INET, SOCK_DGRAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
s2.bind(INETAddress('127.0.0.1', INADDR_ANY))
addr2 = s2.getsockname()
s1.sendto('?', 1, 0, addr2)
buf = s2.recv(100)
assert buf == '?'
s2.connect(addr)
count = s2.send('x'*99)
assert 1 <= count <= 99
buf, addr3 = s1.recvfrom(100)
assert buf == 'x'*count
print addr2, addr3
assert addr2.get_port() == addr3.get_port()
s1.close()
s2.close()
def test_nonblocking():
sock = RSocket()
sock.setblocking(False)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
err = py.test.raises(CSocketError, sock.accept)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.setblocking(False)
err = py.test.raises(CSocketError, s2.connect, addr)
assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK)
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
s1.setblocking(False)
assert addr.eq(s2.getpeername())
assert addr2.get_port() == s2.getsockname().get_port()
assert addr2.eq(s1.getpeername())
err = s2.connect_ex(addr) # should now work
assert err in (0, errno.EISCONN)
s1.send('?')
import time
time.sleep(0.01) # Windows needs some time to transfer data
buf = s2.recv(100)
assert buf == '?'
err = py.test.raises(CSocketError, s1.recv, 5000)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
count = s2.send('x'*50000)
assert 1 <= count <= 50000
while count: # Recv may return less than requested
buf = s1.recv(count + 100)
assert len(buf) <= count
assert buf.count('x') == len(buf)
count -= len(buf)
# Check that everything has been read
err = py.test.raises(CSocketError, s1.recv, 5000)
s1.close()
s2.close()
def test_inheritable():
for inh in [False, True]:
s1 = RSocket(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
s1.close()
def test_getaddrinfo_http():
lst = getaddrinfo('localhost', 'http')
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if (family == AF_INET and
socktype == SOCK_STREAM and
addr.get_host() == '127.0.0.1' and
addr.get_port() == 80):
found = True
assert found, lst
# The following might fail if the DNS redirects failed requests to a
# catch-all address (i.e. opendns).
e = py.test.raises(GAIError, getaddrinfo, 'www.very-invalidaddress.com', None)
assert isinstance(e.value.get_msg(), str)
assert isinstance(e.value.get_msg_unicode(), unicode)
def getaddrinfo_pydotorg(i, result):
lst = getaddrinfo('python.org', None)
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if addr.get_host() in ('104.130.43.121', '23.253.135.79', '45.55.99.72'):
found = True
elif family == AF_INET:
print 'pydotorg changed to', addr.get_host()
result[i] += found
def test_getaddrinfo_pydotorg():
result = [0,]
getaddrinfo_pydotorg(0, result)
assert result[0] == 1
def test_getaddrinfo_no_reverse_lookup():
# It seems that getaddrinfo never runs a reverse lookup on Linux.
# Python2.3 on Windows returns the hostname.
lst = getaddrinfo('82.94.164.162', None, flags=AI_NUMERICHOST)
assert isinstance(lst, list)
found = False
print lst
for family, socktype, protocol, canonname, addr in lst:
assert 'python.org' not in canonname
if addr.get_host() == '82.94.164.162':
found = True
assert found, lst
def test_getaddrinfo_osx_crash():
# see CPython issue17269
for port in [None, '0', '00']:
getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV)
def test_connect_ex():
s = RSocket()
err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work
assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL)
s.close()
def test_connect_with_timeout_fail():
s = RSocket()
s.settimeout(0.1)
with py.test.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_connect_with_timeout_succeed():
s = RSocket()
s.settimeout(10.0)
s.connect(INETAddress('python.org', 80))
s.close()
def test_connect_with_default_timeout_fail():
rsocket.setdefaulttimeout(0.1)
s = RSocket()
rsocket.setdefaulttimeout(None)
assert s.gettimeout() == 0.1
with py.test.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_getsetsockopt():
import struct
assert struct.calcsize("i") == rffi.sizeof(rffi.INT)
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
# Test string case
s = RSocket(AF_INET, SOCK_STREAM)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value == 0
optstr = struct.pack("i", 1)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, optstr)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value != 0
def test_getsetsockopt_global():
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
fd = s.fd
reuse = getsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = getsockopt_int(fd, SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
def test_get_socket_family():
s = RSocket(AF_INET, SOCK_STREAM)
fd = s.fd
assert get_socket_family(fd) == AF_INET
if getattr(rsocket, 'AF_UNIX', None) is not None:
s = RSocket(AF_UNIX)
fd = s.fd
assert get_socket_family(fd) == AF_UNIX
def test_dup():
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
if sys.platform == "win32":
assert not hasattr(s, 'dup')
return
s2 = s.dup()
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_c_dup():
# rsocket.dup() duplicates fd, it also works on Windows
# (but only on socket handles!)
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
s2 = RSocket(fd=dup(s.fd))
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_inet_aton():
assert inet_aton('1.2.3.4') == '\x01\x02\x03\x04'
assert inet_aton('127.0.0.1') == '\x7f\x00\x00\x01'
tests = ["127.0.0.256", "127.0.0.255555555555555555", "127.2b.0.0",
"127.2.0.0.1", "127.2.0."]
for ip in tests:
py.test.raises(SocketError, inet_aton, ip)
# Windows 2000: missing numbers are replaced by 0
for ip, aton in [("11..22.33", '\x0b\x00\x16\x21'),
(".11.22.33", '\x00\x0b\x16\x21')]:
try:
assert inet_aton(ip) == aton
except SocketError:
pass
def test_inet_ntoa():
assert inet_ntoa('\x01\x02\x03\x04') == '1.2.3.4'
def test_inet_pton():
if not hasattr(rsocket, 'inet_pton'):
py.test.skip("no inet_pton()")
assert inet_pton(AF_INET, '1.2.3.5') == '\x01\x02\x03\x05'
py.test.raises(SocketError, inet_pton, AF_INET, '127.0.0.256')
def test_inet_ntop():
if not hasattr(rsocket, 'inet_ntop'):
py.test.skip("no inet_ntop()")
assert inet_ntop(AF_INET, '\x01\x02\x03\x05') == '1.2.3.5'
def test_unix_socket_connect():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
from rpython.tool.udir import udir
sockpath = str(udir.join('test_unix_socket_connect'))
a = UNIXAddress(sockpath)
serversock = RSocket(AF_UNIX)
serversock.bind(a)
serversock.listen(1)
clientsock = RSocket(AF_UNIX)
clientsock.connect(a)
fd, addr = serversock.accept()
s = RSocket(AF_UNIX, fd=fd)
s.send('X')
data = clientsock.recv(100)
assert data == 'X'
clientsock.send('Y')
data = s.recv(100)
assert data == 'Y'
clientsock.close()
s.close()
class TestTCP:
PORT = 50007
HOST = 'localhost'
def setup_method(self, method):
self.serv = RSocket(AF_INET, SOCK_STREAM)
self.serv.bind(INETAddress(self.HOST, self.PORT))
self.serv.listen(1)
def teardown_method(self, method):
self.serv.close()
self.serv = None
def test_timeout(self):
def raise_timeout():
self.serv.settimeout(1.0)
self.serv.accept()
py.test.raises(SocketTimeout, raise_timeout)
def test_timeout_zero(self):
def raise_error():
self.serv.settimeout(0.0)
foo = self.serv.accept()
py.test.raises(SocketError, raise_error)
def _test_cond_include(cond):
# Test that _rsocket_rffi is importable even on platforms where
# AF_PACKET or AF_NETLINK is not defined.
import re
from rpython.rlib import _rsocket_rffi
srcfile = _rsocket_rffi.__file__
if srcfile.lower().endswith('c') or srcfile.lower().endswith('o'):
srcfile = srcfile[:-1] # .pyc => .py
assert srcfile.lower().endswith('.py')
sourcelines = open(srcfile, 'rb').read().splitlines()
found = False
for i, line in enumerate(sourcelines):
line2 = re.sub(r"(\s*COND_HEADER\s*=)",
r"\1'#undef %s\\n'+" % cond,
line)
if line2 != line:
found = True
sourcelines[i] = line2
assert found
d = {}
sourcelines.append('')
exec '\n'.join(sourcelines) in d
def test_no_AF_PACKET():
_test_cond_include('AF_PACKET')
def test_no_AF_NETLINK():
_test_cond_include('AF_NETLINK')
def test_thread_safe_gethostbyaddr():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
ip = '8.8.8.8'
domain = gethostbyaddr(ip)[0]
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_addr(ip, i):
name, aliases, address_list = gethostbyaddr(ip, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_addr, args=[ip, i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_thread_safe_gethostbyname_ex():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
domain = 'google.com'
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_name(i):
name, aliases, address_list = gethostbyname_ex(domain, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_name, args=[i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_getaddrinfo_pydotorg_threadsafe():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
result = [0] * nthreads
threads = [None] * nthreads
for i in range(nthreads):
threads[i] = threading.Thread(target = getaddrinfo_pydotorg, args=[i, result])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_translate_netdb_lock():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [])
assert fc() == 0
def test_translate_netdb_lock_thread():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [], thread=True)
assert fc() == 0
def test_socket_saves_errno(tmpdir):
# ensure errno is set to a known value...
unconnected_sock = RSocket()
e = py.test.raises(CSocketError, unconnected_sock.recv, 1024)
# ...which is ENOTCONN
assert e.value.errno == errno.ENOTCONN
e = py.test.raises(CSocketError,
RSocket,
family=AF_INET, type=SOCK_STREAM, proto=SOL_UDP)
assert e.value.errno in (errno.EPROTOTYPE, errno.EPROTONOSUPPORT)
def test_socket_init_non_blocking():
import fcntl, os
s = RSocket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK)
assert s.type == SOCK_STREAM
assert s.gettimeout() == 0.0
assert fcntl.fcntl(s.fd, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK
|
main.py
|
from schema import Schema
from connection import OracleConnection,MongoConnection
import redis
from log import main_logger
import datetime
import config
import threading
import uuid
# try to connect to redis server
try:
r6 = redis.StrictRedis(host="localhost",port=6379,db=6)
r6.ping()
r6.set("start",str(datetime.datetime.now()))
except Exception as e:
print("redis - connection refused")
main_logger.error("redis - connection refused")
exit()
# CAUSION :
# each thread gets 8MB stack of your memory to address
# if you want to use more threads be carefull
# HINT: you can use pthread_attr_setstacksize() to reduce the size of thread stacks (threading.stacksize())
MAX_THREAD = config.max_threads
INSERT_COUNT = config.insert_count
oc = OracleConnection(autocommit=True)
cursor = oc.get_cursor()
cursor.execute("select column_name, data_type from all_tab_columns where table_name = 'CONNECTION_LOG'")
with open('evaluate.txt','w') as f:
for column,data_type in cursor.fetchall():
if data_type.startswith('NUMBER'):
data_type = 'int'
elif data_type.startswith('VARCHAR'):
data_type = 'str'
elif data_type.startswith('DATE'):
data_type = 'datetime'
f.write("{} {}\n".format(column,data_type))
cursor.execute("select column_name from all_tab_columns where table_name = 'CONNECTION_LOG' and nullable = 'N' ")
mandator_fields = []
for column in cursor.fetchall():
mandator_fields.append(*column)
def check_threads(threads):
current_threads = len(threads)
if current_threads >= MAX_THREAD :
main_logger.debug("number of threads exceeded")
main_logger.debug("waiting to release the some threads...")
release_threads = int(current_threads/2)
for i in range(release_threads):
if i is threading.current_thread():
continue
th = threads.pop(0)
th.join()
main_logger.debug(f"thread {th} released successfully")
schema = Schema("schema.txt","evaluate.txt",mandator_fields)
mongo = MongoConnection()
main_logger.info("database drivers initiated successfully ...")
docs = mongo.get_docs()
threads = []
oracle_rows = []
docs_count = docs.count()
for doc in docs:
data = schema.pre_processing(doc)
if data:
oracle_rows.append(schema.checkout(data))
if len(oracle_rows) % INSERT_COUNT == 0:
check_threads(threads)
t = threading.Thread(target=oc.insert_many,args=(oracle_rows,),name=str(uuid.uuid1()))
threads.append(t)
t.start()
oracle_rows = []
if len(oracle_rows) > 0 :
t = threading.Thread(target=oc.insert_many,args=(oracle_rows,),name=str(uuid.uuid1()))
threads.append(t)
t.start()
#TODO: Insert connection log ids to disk
# make sure all of the threads are done before close the connections
for th in threads:
th.join()
del oc
mongo.__del__()
r6.set("stop",str(datetime.datetime.now()))
|
data_collector.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import time
from datetime import datetime
from threading import Event
# ============= enthought library imports =======================
from apptools.preferences.preference_binding import bind_preference
from traits.api import Any, List, CInt, Int, Bool, Enum, Str, Instance
from pychron.envisage.consoleable import Consoleable
from pychron.pychron_constants import AR_AR, SIGNAL, BASELINE, WHIFF, SNIFF
class DataCollector(Consoleable):
"""
Base class for ``Collector`` objects. Provides logic for iterative measurement.
"""
measurement_script = Any
automated_run = Instance('pychron.experiment.automated_run.automated_run.AutomatedRun')
measurement_result = Str
detectors = List
check_conditionals = Bool(True)
ncounts = CInt
is_baseline = Bool(False)
for_peak_hop = Bool(False)
fits = List
series_idx = Int
fit_series_idx = Int
canceled = False
terminated = False
_truncate_signal = False
starttime = None
starttime_abs = None
_alive = False
_evt = None
_warned_no_fit = None
_warned_no_det = None
collection_kind = Enum((SNIFF, WHIFF, BASELINE, SIGNAL))
refresh_age = False
_data = None
_temp_conds = None
_result = None
_queue = None
err_message = Str
no_intensity_threshold = 100
not_intensity_count = 0
trigger = None
plot_panel_update_period = Int(1)
def __init__(self, *args, **kw):
super(DataCollector, self).__init__(*args, **kw)
bind_preference(self, 'plot_panel_update_period', 'pychron.experiment.plot_panel_update_period')
# def wait(self):
# st = time.time()
# self.debug('wait started')
# while 1:
# if self._evt and self._evt.set():
# break
# self.debug('wait complete {:0.1f}s'.format(time.time() - st))
def set_truncated(self):
self._truncate_signal = True
def stop(self):
self._alive = False
if self._evt:
self._evt.set()
def set_starttime(self, s):
self.starttime = s
# convert s (result of time.time()) to a datetime object
self.starttime_abs = datetime.fromtimestamp(s)
def measure(self):
if self.canceled:
return
self.measurement_result = ''
self.terminated = False
self._truncate_signal = False
self._warned_no_fit = []
self._warned_no_det = []
if self.starttime is None:
self.starttime = time.time()
self.starttime_abs = datetime.now()
et = self.ncounts * self.period_ms * 0.001
self._alive = True
self._measure()
tt = time.time() - self.starttime
self.debug('estimated time: {:0.3f} actual time: :{:0.3f}'.format(et, tt))
# def plot_data(self, *args, **kw):
# from pychron.core.ui.gui import invoke_in_main_thread
# invoke_in_main_thread(self._plot_data, *args, **kw)
def set_temporary_conditionals(self, cd):
self._temp_conds = cd
def clear_temporary_conditionals(self):
self._temp_conds = None
# private
def _measure(self):
self.debug('starting measurement')
self._evt = evt = Event()
# self._queue = q = Queue()
# def writefunc():
# writer = self.data_writer
# while not q.empty() or not evt.wait(10):
# dets = self.detectors
# while not q.empty():
# x, keys, signals = q.get()
# writer(dets, x, keys, signals)
#
# # only write to file every 10 seconds and not on main thread
# t = Thread(target=writefunc)
# # t.setDaemon(True)
# t.start()
self.debug('measurement period (ms) = {}'.format(self.period_ms))
period = self.period_ms * 0.001
i = 1
while not evt.is_set():
result = self._check_iteration(i)
if not result:
if not self._pre_trigger_hook():
break
if self.trigger:
self.trigger()
evt.wait(period)
self.automated_run.plot_panel.counts = i
if not self._iter_hook(i):
break
self._post_iter_hook(i)
i += 1
else:
if result == 'cancel':
self.canceled = True
elif result == 'terminate':
self.terminated = True
break
evt.set()
# self.debug('waiting for write to finish')
# t.join()
self.debug('measurement finished')
def _pre_trigger_hook(self):
return True
def _post_iter_hook(self, i):
if self.experiment_type == AR_AR and self.refresh_age and not i % 5:
self.isotope_group.calculate_age(force=True)
def _pre_trigger_hook(self):
return True
def _iter_hook(self, i):
return self._iteration(i)
def _iteration(self, i, detectors=None):
try:
data = self._get_data(detectors)
if not data:
return
k, s, t = data
except (AttributeError, TypeError, ValueError) as e:
self.debug('failed getting data {}'.format(e))
return
if k is not None and s is not None:
x = self._get_time(t)
self._save_data(x, k, s)
self._plot_data(i, x, k, s)
return True
def _get_time(self, t):
if t is None:
t = time.time()
r = t - self.starttime
else:
# t is provided by the spectrometer. t should be a python datetime object
# since t is in absolute time use self.starttime_abs
r = t-self.starttime_abs
# convert to seconds
r = r.total_seconds()
return r
def _get_data(self, detectors=None):
try:
data = next(self.data_generator)
except StopIteration:
self.debug('data generator stopped')
return
if data:
keys, signals, ct = data
if detectors:
# data = list(zip(*(d for d in zip(*data) if d[0] in detectors)))
nkeys, nsignals = [], []
for k, s in zip(keys, signals):
if k in detectors:
nkeys.append(k)
nsignals.append(s)
data = (nkeys, nsignals, ct)
self._data = (nkeys, nsignals)
else:
self._data = (keys, signals)
return data
def _save_data(self, x, keys, signals):
# self._queue.put((x, keys, signals))
self.data_writer(self.detectors, x, keys, signals)
# update arar_age
if self.is_baseline and self.for_peak_hop:
self._update_baseline_peak_hop(x, keys, signals)
else:
self._update_isotopes(x, keys, signals)
def _update_baseline_peak_hop(self, x, keys, signals):
ig = self.isotope_group
for iso in ig.itervalues():
signal = self._get_signal(keys, signals, iso.detector)
if signal is not None:
if not ig.append_data(iso.name, iso.detector, x, signal, 'baseline'):
self.debug('baselines - failed appending data for {}. '
'not a current isotope {}'.format(iso, ig.isotope_keys))
def _update_isotopes(self, x, keys, signals):
a = self.isotope_group
kind = self.collection_kind
for dn in keys:
dn = self._get_detector(dn)
if dn:
iso = dn.isotope
signal = self._get_signal(keys, signals, dn.name)
if signal is not None:
if not a.append_data(iso, dn.name, x, signal, kind):
self.debug('{} - failed appending data for {}. not a current isotope {}'.format(kind, iso,
a.isotope_keys))
def _get_signal(self, keys, signals, det):
try:
return signals[keys.index(det)]
except ValueError:
if det not in self._warned_no_det:
self.warning('Detector {} is not available'.format(det))
self._warned_no_det.append(det)
self.canceled = True
self.stop()
def _get_detector(self, d):
if isinstance(d, str):
d = next((di for di in self.detectors
if di.name == d), None)
return d
def _plot_data(self, cnt, x, keys, signals):
for dn, signal in zip(keys, signals):
det = self._get_detector(dn)
if det:
self._set_plot_data(cnt, det, x, signal)
if not cnt % self.plot_panel_update_period:
self.plot_panel.update()
def _set_plot_data(self, cnt, det, x, signal):
iso = det.isotope
detname = det.name
ypadding = det.ypadding
if self.collection_kind == SNIFF:
gs = [(self.plot_panel.sniff_graph, iso, None, 0, 0),
(self.plot_panel.isotope_graph, iso, None, 0, 0)]
elif self.collection_kind == BASELINE:
iso = self.isotope_group.get_isotope(detector=detname, kind='baseline')
if iso is not None:
fit = iso.get_fit(cnt)
else:
fit = 'average'
gs = [(self.plot_panel.baseline_graph, detname, fit, 0, 0)]
else:
title = self.isotope_group.get_isotope_title(name=iso, detector=detname)
iso = self.isotope_group.get_isotope(name=iso, detector=detname)
fit = iso.get_fit(cnt)
gs = [(self.plot_panel.isotope_graph, title, fit, self.series_idx, self.fit_series_idx)]
for g, name, fit, series, fit_series in gs:
pid = g.get_plotid_by_ytitle(name)
g.add_datum((x, signal),
series=series,
plotid=pid,
update_y_limits=True,
ypadding=ypadding)
if fit:
g.set_fit(fit, plotid=pid, series=fit_series)
# ===============================================================================
#
# ===============================================================================
# ===============================================================================
# checks
# ===============================================================================
# def _check_modification_conditionals(self, cnt):
# tripped = self._check_conditionals(self.modification_conditionals, cnt)
# if tripped:
# queue = self.automated_run.experiment_executor.experiment_queue
# tripped.do_modifications(queue, self.automated_run)
# if tripped.use_truncation:
# return self._set_run_truncated()
def _check_conditionals(self, conditionals, cnt):
self.err_message = ''
for ti in conditionals:
if ti.check(self.automated_run, self._data, cnt):
m = 'Conditional tripped: {}'.format(ti.to_string())
self.info(m)
self.err_message = m
return ti
def _modification_func(self, tr):
queue = self.automated_run.experiment_executor.experiment_queue
tr.do_modifications(queue, self.automated_run)
self.measurement_script.abbreviated_count_ratio = tr.abbreviated_count_ratio
if tr.use_truncation:
return self._set_truncated()
elif tr.use_termination:
return 'terminate'
def _truncation_func(self, tr):
self.measurement_script.abbreviated_count_ratio = tr.abbreviated_count_ratio
return self._set_truncated()
def _action_func(self, tr):
tr.perform(self.measurement_script)
if not tr.resume:
return 'break'
def _set_truncated(self):
self.state = 'truncated'
self.automated_run.truncated = True
self.automated_run.spec.state = 'truncated'
return 'break'
def _check_iteration(self, i):
if self._temp_conds:
ti = self._check_conditionals(self._temp_conds, i)
if ti:
self.measurement_result = ti.action
return 'break'
j = i - 1
user_counts = 0 if self.plot_panel is None else self.plot_panel.ncounts
script_counts = 0 if self.measurement_script is None else self.measurement_script.ncounts
original_counts = self.ncounts
count_args = (j, original_counts)
# self.debug('user_counts={}, script_counts={}, original_counts={}'.format(user_counts,
# script_counts,
# original_counts))
if not self._alive:
self.info('measurement iteration executed {}/{} counts'.format(*count_args))
return 'cancel'
if user_counts != original_counts:
if i > user_counts:
self.info('user termination. measurement iteration executed {}/{} counts'.format(*count_args))
self.plot_panel.total_counts -= (original_counts - i)
return self._set_truncated()
elif script_counts != original_counts:
if i > script_counts:
self.info('script termination. measurement iteration executed {}/{} counts'.format(*count_args))
return self._set_truncated()
elif i > original_counts:
return 'break'
if self._truncate_signal:
self.info('measurement iteration executed {}/{} counts'.format(*count_args))
self._truncate_signal = False
return self._set_truncated()
if self.check_conditionals:
for tag, func, conditionals in (('modification', self._modification_func, self.modification_conditionals),
('truncation', self._truncation_func, self.truncation_conditionals),
('action', self._action_func, self.action_conditionals),
('termination', lambda x: 'terminate', self.termination_conditionals),
('cancelation', lambda x: 'cancel', self.cancelation_conditionals)):
tripped = self._check_conditionals(conditionals, i)
if tripped:
self.info('{} conditional {}. measurement iteration executed {}/{} counts'.format(tag,
tripped.message,
j,
original_counts),
color='red')
self.automated_run.show_conditionals(tripped=tripped)
return func(tripped)
@property
def isotope_group(self):
if self.automated_run:
return self.automated_run.isotope_group
@property
def plot_panel(self):
if self.automated_run:
return self.automated_run.plot_panel
@property
def modification_conditionals(self):
if self.automated_run:
return self.automated_run.modification_conditionals
@property
def truncation_conditionals(self):
if self.automated_run:
return self.automated_run.truncation_conditionals
@property
def termination_conditionals(self):
if self.automated_run:
return self.automated_run.termination_conditionals
@property
def action_conditionals(self):
if self.automated_run:
return self.automated_run.action_conditionals
@property
def cancelation_conditionals(self):
if self.automated_run:
return self.automated_run.cancelation_conditionals
# ============= EOF =============================================
|
QueueRunner.py
|
"""
QueueRunner plugin
##################
QueueRunner plugin implements simple queue for task execution instead of starting
threads for ongoing tasks.
For example, if number of threads 10, but task need to be executed on 20 hosts,
threaded runner will start first 10 threads to run task for first 10 hosts,
after that start another 10 threads to run task for remaining 10 hosts.
Above process works well for majority of cases, but using QueueRunner might be
beneficial in certain situations, e.g. QueueRunner pros:
- worker threads started only once saving some negligible CPU cycles
- even if one of the hosts takes longer time to complete the task, threads will
not stay idle and continue serving other hosts, that might reveal better
execution time
QueueRunner Architecture
========================
.. image:: ../_images/QueueRunner_v0.png
QueueRunner Sample Usage
========================
Need to instruct Nornir to use QueueRunner on instantiation::
from nornir import InitNornir
NornirObj = InitNornir(
runner={
"plugin": "QueueRunner",
"options": {
"num_workers": 100
}
}
)
QueueRunner Reference
=====================
.. autoclass:: nornir_salt.plugins.runners.QueueRunner.QueueRunner
"""
import threading
import queue
from typing import List
from nornir.core.task import AggregatedResult, Task
from nornir.core.inventory import Host
LOCK = threading.Lock()
class QueueRunner:
"""
QueueRunner run tasks over each host using queue together
with workers threads consuming work from work queue.
Instead of firing up num_workes threads for each batch of hosts,
QueueRunner starts num_workes threads once and uses queue to
submit tasks and obtain results.
Arguments:
num_workers: number of threads to use
"""
def __init__(self, num_workers: int = 20) -> None:
self.num_workers = num_workers
def worker(self, work_q):
while True:
work_to_do = work_q.get()
if work_to_do is None:
break
task, host, result = work_to_do
work_result = task.copy().start(host)
with LOCK:
result[host.name] = work_result
work_q.task_done()
def run(self, task: Task, hosts: List[Host]) -> AggregatedResult:
work_q = queue.Queue()
result = AggregatedResult(task.name)
# enqueue hosts in work queue
for host in hosts:
work_q.put((task, host, result))
# start threads
threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self.worker, args=(work_q,), daemon=True)
t.start()
threads.append(t)
# block until all tasks are done
work_q.join()
# stop workers:
for i in range(self.num_workers):
work_q.put(None)
for t in threads:
t.join()
return result
|
runKeywordAsync.py
|
import sys
import os
import time
from robot.libraries.BuiltIn import BuiltIn
from robot.output.logger import LOGGER
class runKeywordAsync:
def __init__(self):
self._thread_pool = {}
self._last_thread_handle = 1
#self._robot_log_level = BuiltIn().get_variable_value("${LOG_LEVEL}")
def run_method_async(self, keyword, *args, **kwargs):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded_method(keyword, *args, **kwargs)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def run_keyword_async(self, keyword, *args):
#BuiltIn().set_log_level("NONE")
handle = self._last_thread_handle
thread = self._threaded(keyword, *args)
thread.start()
self._thread_pool[handle] = thread
self._last_thread_handle += 1
return handle
def wait_async_all(self, timeout=60):
timeout = int(timeout)
results = []
for thread in self._thread_pool:
try:
result = self._thread_pool[thread].result_queue.get(True, timeout)
results.append(result)
except:
#BuiltIn().set_log_level(self._robot_log_level)
for thread in self._thread_pool:
self._thread_pool[thread].terminate()
raise Exception("Process " + str(thread) + " Failed")
#BuiltIn().set_log_level(self._robot_log_level)
self._thread_pool = {}
self._last_thread_handle = 1
return results
def get_async_return(self, handle, timeout=60):
timeout = int(timeout)
if handle in self._thread_pool:
try:
result = self._thread_pool[handle].result_queue.get(True, timeout)
del self._thread_pool[handle]
BuiltIn().set_log_level(self._robot_log_level)
return result
except:
raise Exception("Process " + str(handle) + " Failed")
else:
raise Exception("Passed Process id " + str(handle) + " is not a valid id")
def _threaded_method(self, keyword, *args, **kwargs):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args, **kwargs):
''' Calls the decorated function and puts the result in a queue '''
ret = BuiltIn().call_method(keyword, *args, **kwargs)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args, kwargs=kwargs)
th.result_queue = q
return th
def _threaded(self, keyword, *args):
from multiprocessing import Queue
from multiprocessing import Process
def wrapped_f(q, *args):
''' Calls the decorated function and puts the result in a queue '''
LOGGER.unregister_xml_logger()
ret = BuiltIn().run_keyword(keyword, *args)
q.put(ret)
q = Queue()
th = Process(target=wrapped_f, args=(q,)+args)
th.result_queue = q
return th
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
import zipfile
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(cc, ['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(cc, ['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
self.libpkgconfigdir = 'lib/pkgconfig'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir,
'--libpkgconfigdir', self.libpkgconfigdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}')".format(name))
return path
def dist_impl(self, vcs_init, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']),
sorted(z.namelist()))
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
self.assertEqual(sorted(['disttest-1.4.3/',
'disttest-1.4.3/subprojects/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c',
'disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/meson.build']),
sorted(z.namelist()))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
libpkgconfigdir = 'lib/pkgconfig'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir,
'--libpkgconfigdir=' + libpkgconfigdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
try:
env.detect_cpp_compiler(MachineChoice.HOST)
langs.append('cpp')
except EnvironmentException:
pass
try:
env.detect_d_compiler(MachineChoice.HOST)
langs.append('d')
except EnvironmentException:
pass
try:
env.detect_fortran_compiler(MachineChoice.HOST)
langs.append('fortran')
except EnvironmentException:
pass
try:
env.detect_objc_compiler(MachineChoice.HOST)
langs.append('objc')
except EnvironmentException:
pass
# FIXME: omitting rust as Windows AppVeyor CI finds Rust but doesn't link correctly
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in ('c', 'cpp'):
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['set_percent_opt'].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
with mock.patch.dict(os.environ, {'LD': name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
self._check_ld('optlink', 'c', 'optlink')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2])}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--libpkgconfigdir=lib/pkgconfig'],
default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--libpkgconfigdir=lib/pkgconfig',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '68 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--libpkgconfigdir=lib/pkgconfig',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '69 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{} -Wl,-rpath,{}'.format(libdir, libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '69 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
with mock.patch.dict(os.environ, {'LD': name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if lang != 'rust' and comp.use_linker_args('foo') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'GNU ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'GNU ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'lld')
def test_ld_environment_variable_rust(self):
self._check_ld('ld.gold', 'gold', 'rust', 'GNU ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'GNU ld.gold')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'GNU ld.gold')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'GNU ld.gold')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'GNU ld.gold')
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
if os.path.exists('/etc/debian_version'):
rc = subprocess.call(['pkg-config', '--cflags', 'python2'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc != 0:
# Python 2 will be removed in Debian Bullseye, thus we must
# remove the build dependency on python2-dev. Keep the tests
# but only run them if dev packages are available.
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functioality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
if sys.version_info.major <= 3 and sys.version_info.minor <= 5:
raise ImportError('pytest with python <= 3.5 is causing issues on the CI')
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
pass
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
raise SystemExit(main())
|
manager.py
|
#!/usr/bin/env python3.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
import datetime
from common.spinner import Spinner
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import traceback
from multiprocessing import Process
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
#"thermald": "selfdrive.thermald",
#"uploader": "selfdrive.loggerd.uploader",
#"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
#"plannerd": "selfdrive.controls.plannerd",
#"radard": "selfdrive.controls.radard",
#"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
#"logmessaged": "selfdrive.logmessaged",
#"tombstoned": "selfdrive.tombstoned",
#"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
#"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
#"pandad": "selfdrive.pandad",
#"ui": ("selfdrive/ui", ["./start.py"]),
#"calibrationd": "selfdrive.locationd.calibrationd",
#"paramsd": ("selfdrive/locationd", ["./paramsd"]),
#"visiond": ("selfdrive/visiond", ["./visiond"]),
#"sensord": ("selfdrive/sensord", ["./start_sensord.py"]),
#"gpsd": ("selfdrive/sensord", ["./start_gpsd.py"]),
#"updated": "selfdrive.updated",
}
daemon_processes = {
"athenad": "selfdrive.athena.athenad",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = [] #'visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = [] #'sensord', 'paramsd']
persistent_processes = [
#'thermald',
#'logmessaged',
#'logcatd',
#'tombstoned',
#'uploader',
#'ui',
#'updated',
]
car_started_processes = [
'controlsd',
#'plannerd',
#'loggerd',
#'sensord',
#'radard',
#'calibrationd',
#'paramsd',
#'visiond',
#'proclogd',
#'ubloxd',
#'gpsd',
#'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# terminate the zmq context since we forked
import zmq
zmq.Context.instance().term()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name, params):
proc = daemon_processes[name]
pid_param = name.capitalize() + 'Pid'
pid = params.get(pid_param)
if pid is not None:
try:
os.kill(int(pid), 0)
# process is running (kill is a poorly-named system call)
return
except OSError:
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
cwd='/',
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock(service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p, params)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("LD_LIBRARY_PATH= appops set ai.comma.plus.offroad SU allow")
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
running_list = [" running %s %s" % (p, running[p]) for p in running]
cloudlog.debug('\n'.join(running_list))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"], encoding='utf8').strip().split("\n") # pylint: disable=unexpected-keyword-arg
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path, 'rb').read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app], 'rb').read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
update_apks()
uninstall = [app for app in get_installed_apks().keys() if app in ("com.spotify.music", "com.waze")]
for app in uninstall:
cloudlog.info("uninstalling %s" % app)
os.system("pm uninstall % s" % app)
def manager_prepare(spinner=None):
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % (100.0 * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['plannerd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process) # pylint: disable=no-member
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("CompletedTrainingVersion") is None:
params.put("CompletedTrainingVersion", "0")
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("HasCompletedSetup") is None:
params.put("HasCompletedSetup", "0")
if params.get("IsUploadRawEnabled") is None:
params.put("IsUploadRawEnabled", "1")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
if params.get("LimitSetSpeedNeural") is None:
params.put("LimitSetSpeedNeural", "0")
if params.get("LastUpdateTime") is None:
t = datetime.datetime.now().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("OpenpilotEnabledToggle") is None:
params.put("OpenpilotEnabledToggle", "1")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
#with Spinner() as spinner:
#spinner.update("0") # Show progress bar
#manager_update()
#manager_init()
manager_prepare(None)
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
session_debug_testlib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.VariableV1(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.VariableV1(str1_init, name=str1_name)
str2 = variables.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.VariableV1(10.0, name="x")
y = variables.VariableV1(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
self.assertEqual(y_name, bad_data[0].node_name)
self.assertEqual(z_name, bad_data[1].node_name)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(y_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.VariableV1(1.0, name="v1")
v2 = variables.VariableV1(2.0, name="v2")
v3 = variables.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.VariableV1(10.0, name="v")
delta = variables.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.VariableV1(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.VariableV1("1", name="a")
b = variables.VariableV1("3", name="b")
c = variables.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.VariableV1([10.0, 10.0], name="a")
b = variables.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.VariableV1(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
util.py
|
from asyncio import (
AbstractEventLoop,
get_event_loop,
new_event_loop,
set_event_loop,
)
from functools import lru_cache, wraps
from threading import Thread
from typing import (
TYPE_CHECKING,
Any,
Callable,
Coroutine,
Optional,
Type,
TypeVar,
)
T = TypeVar("T")
def cached_property(func: Callable[..., T]) -> T:
return property(lru_cache()(func)) # type: ignore
def prune_dict(d: dict) -> dict:
"""Prune items from dictionaries where value is None"""
return {k: v for k, v in d.items() if v is not None}
TMix = TypeVar("TMix")
def mixin_for(baseclass: Type[TMix]) -> Type[TMix]:
"""
Useful function to make mixins with baseclass typehint
Should be used as a mixin base class to fix typehints
```
class ReadonlyMixin(mixin_for(BaseClass))):
...
```
"""
if TYPE_CHECKING:
return baseclass
return object
def fix_url_schema(url: str) -> str:
return url if url.startswith("http") else f"https://{url}"
class AsyncJobThread:
"""
Thread runner that allows running async tasks syncronously in a separate thread.
Caches loop to be reused in all threads
It allows running async functions syncronously inside a running event loop.
Since nesting loops is not allowed, we create a separate thread for a new event loop
"""
def __init__(self) -> None:
self.loop: Optional[AbstractEventLoop] = None
self.result: Optional[Any] = None
self.exception: Optional[BaseException] = None
def _initialize_loop(self) -> None:
if not self.loop:
try:
# despite the docs, this function fails if no loop is set
self.loop = get_event_loop()
except RuntimeError:
self.loop = new_event_loop()
set_event_loop(self.loop)
def run(self, coro: Coroutine) -> None:
try:
self._initialize_loop()
assert self.loop is not None
self.result = self.loop.run_until_complete(coro)
except BaseException as e:
self.exception = e
def execute(self, coro: Coroutine) -> Any:
thread = Thread(target=self.run, args=[coro])
thread.start()
thread.join()
if self.exception:
raise self.exception
return self.result
def async_to_sync(f: Callable, async_job_thread: AsyncJobThread = None) -> Callable:
@wraps(f)
def sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = get_event_loop()
except RuntimeError:
loop = new_event_loop()
set_event_loop(loop)
# We are inside a running loop
if loop.is_running():
nonlocal async_job_thread
if not async_job_thread:
async_job_thread = AsyncJobThread()
return async_job_thread.execute(f(*args, **kwargs))
return loop.run_until_complete(f(*args, **kwargs))
return sync
|
utils.py
|
"""
Lot of the code here is stolen from C-lightning's test suite. This is surely
Rusty Russell or Christian Decker who wrote most of this (I'd put some sats on
cdecker), so credits to them ! (MIT licensed)
"""
import bip32
import coincurve
import itertools
import json
import logging
import os
import re
import socket
import subprocess
import threading
import time
from ephemeral_port_reserve import reserve
from test_framework import serializations
from typing import Optional
TIMEOUT = int(os.getenv("TIMEOUT", 60))
DEBUG_GUI = os.getenv("DEBUG_GUI", "0") == "1"
EXECUTOR_WORKERS = int(os.getenv("EXECUTOR_WORKERS", 20))
POSTGRES_USER = os.getenv("POSTGRES_USER", "")
POSTGRES_PASS = os.getenv("POSTGRES_PASS", "")
POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost")
POSTGRES_IS_SETUP = POSTGRES_USER and POSTGRES_PASS and POSTGRES_HOST
VERBOSE = os.getenv("VERBOSE", "0") == "1"
LOG_LEVEL = os.getenv("LOG_LEVEL", "trace")
assert LOG_LEVEL in ["trace", "debug", "info", "warn", "error"]
def wait_for(success, timeout=TIMEOUT, debug_fn=None):
"""
Run success() either until it returns True, or until the timeout is reached.
debug_fn is logged at each call to success, it can be useful for debugging
when tests fail.
"""
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
if debug_fn is not None:
logging.info(debug_fn())
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
class RpcError(ValueError):
def __init__(self, method: str, payload: dict, error: str):
super(ValueError, self).__init__(
"RPC call failed: method: {}, payload: {}, error: {}".format(
method, payload, error
)
)
self.method = method
self.payload = payload
self.error = error
class Participant:
def __init__(self):
self.hd = bip32.BIP32.from_seed(os.urandom(32))
class User(Participant):
def __init__(self):
super(User, self).__init__()
def get_xpub(self):
return self.hd.get_master_xpub()
def get_xpriv(self):
return self.hd.get_master_xpriv()
def sign_revocation_psbt(self, psbt_str, deriv_index):
"""Attach an ACP signature to the PSBT with the key at {deriv_index}"""
assert isinstance(psbt_str, str)
psbt = serializations.PSBT()
psbt.deserialize(psbt_str)
assert len(psbt.inputs) == 1, "Invalid revocation PSBT"
assert (
serializations.make_p2wsh(psbt.inputs[0].witness_script)
== psbt.inputs[0].witness_utxo.scriptPubKey
)
script_code = psbt.inputs[0].witness_script
sighash = serializations.sighash_all_witness(script_code, psbt, 0, True)
privkey = coincurve.PrivateKey(self.hd.get_privkey_from_path([deriv_index]))
sig = privkey.sign(sighash, hasher=None) + b"\x81" # ALL | ACP
pubkey = self.hd.get_pubkey_from_path([deriv_index])
psbt.inputs[0].partial_sigs[pubkey] = sig
return psbt.serialize()
def sign_unvault_psbt(self, psbt_str, deriv_index):
"""Attach an ALL signature to the PSBT with the key at {deriv_index}"""
assert isinstance(psbt_str, str)
psbt = serializations.PSBT()
psbt.deserialize(psbt_str)
assert len(psbt.inputs) == 1, "Invalid Unvault PSBT"
assert (
serializations.make_p2wsh(psbt.inputs[0].witness_script)
== psbt.inputs[0].witness_utxo.scriptPubKey
)
script_code = psbt.inputs[0].witness_script
sighash = serializations.sighash_all_witness(script_code, psbt, 0)
privkey = coincurve.PrivateKey(self.hd.get_privkey_from_path([deriv_index]))
sig = privkey.sign(sighash, hasher=None) + b"\x01" # ALL
pubkey = self.hd.get_pubkey_from_path([deriv_index])
psbt.inputs[0].partial_sigs[pubkey] = sig
return psbt.serialize()
def sign_spend_psbt(self, psbt_str, deriv_indexes):
"""Attach an ALL signature to each PSBT input with the keys at
{deriv_indexes}"""
assert isinstance(psbt_str, str)
assert isinstance(deriv_indexes, list)
psbt = serializations.PSBT()
psbt.deserialize(psbt_str)
assert len(psbt.inputs) == len(deriv_indexes), "Not enough derivation indexes"
for (i, psbtin) in enumerate(psbt.inputs):
script_code = psbtin.witness_script
sighash = serializations.sighash_all_witness(script_code, psbt, i)
privkey = coincurve.PrivateKey(
self.hd.get_privkey_from_path([deriv_indexes[i]])
)
sig = privkey.sign(sighash, hasher=None) + b"\x01" # ALL
pubkey = self.hd.get_pubkey_from_path([deriv_indexes[i]])
psbtin.partial_sigs[pubkey] = sig
return psbt.serialize()
class Cosig(Participant):
def __init__(self):
super(Cosig, self).__init__()
self.static_key_path = "m/0"
def get_static_key(self):
return self.hd.get_pubkey_from_path(self.static_key_path)
def get_bitcoin_priv(self):
return self.hd.get_privkey_from_path(self.static_key_path)
def get_participants(n_stk, n_man, n_stkman=0):
"""Get the configuration entries for each participant."""
stakeholders = [User() for _ in range(n_stk)]
cosigs = [Cosig() for _ in range(n_stk)]
managers = [User() for _ in range(n_man)]
stkman_stk = [User() for _ in range(n_stkman)]
stkman_cosig = [Cosig() for _ in range(n_stkman)]
stkman_man = [User() for _ in range(n_stkman)]
return (
stakeholders,
cosigs,
managers,
stkman_stk,
stkman_cosig,
stkman_man,
)
def get_descriptors(stks_xpubs, cosigs_keys, mans_xpubs, mans_thresh, cpfp_xpubs, csv):
mscompiler_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"mscompiler",
)
)
try:
subprocess.check_call(["cargo", "build", "--manifest-path", f"{mscompiler_dir}/Cargo.toml"])
except subprocess.CalledProcessError as e:
logging.error(f"Error compiling mscompiler: {str(e)}")
raise e
mscompiler_bin = os.path.join(mscompiler_dir, "target", "debug", "mscompiler")
cmd = [
mscompiler_bin,
f"{json.dumps(stks_xpubs)}",
f"{json.dumps(cosigs_keys)}",
f"{json.dumps(mans_xpubs)}",
str(mans_thresh),
f"{json.dumps(cpfp_xpubs)}",
str(csv),
]
try:
descs_json = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logging.error(f"Error running mscompiler with command '{' '.join(cmd)}'")
raise e
descs = json.loads(descs_json)
return (
descs["deposit_descriptor"],
descs["unvault_descriptor"],
descs["cpfp_descriptor"],
)
class UnixSocket(object):
"""A wrapper for socket.socket that is specialized to unix sockets.
Some OS implementations impose restrictions on the Unix sockets.
- On linux OSs the socket path must be shorter than the in-kernel buffer
size (somewhere around 100 bytes), thus long paths may end up failing
the `socket.connect` call.
This is a small wrapper that tries to work around these limitations.
"""
def __init__(self, path: str):
self.path = path
self.sock: Optional[socket.SocketType] = None
self.connect()
def connect(self) -> None:
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.path)
except OSError as e:
self.close()
if e.args[0] == "AF_UNIX path too long" and os.uname()[0] == "Linux":
# If this is a Linux system we may be able to work around this
# issue by opening our directory and using `/proc/self/fd/` to
# get a short alias for the socket file.
#
# This was heavily inspired by the Open vSwitch code see here:
# https://github.com/openvswitch/ovs/blob/master/python/ovs/socket_util.py
dirname = os.path.dirname(self.path)
basename = os.path.basename(self.path)
# Open an fd to our home directory, that we can then find
# through `/proc/self/fd` and access the contents.
dirfd = os.open(dirname, os.O_DIRECTORY | os.O_RDONLY)
short_path = "/proc/self/fd/%d/%s" % (dirfd, basename)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(short_path)
else:
# There is no good way to recover from this.
raise
def close(self) -> None:
if self.sock is not None:
self.sock.close()
self.sock = None
def sendall(self, b: bytes) -> None:
if self.sock is None:
raise socket.error("not connected")
self.sock.sendall(b)
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
return self.sock.recv(length)
def __del__(self) -> None:
self.close()
class UnixDomainSocketRpc(object):
def __init__(self, socket_path, logger=logging):
self.socket_path = socket_path
self.logger = logger
self.next_id = 0
def _writeobj(self, sock, obj):
s = json.dumps(obj, ensure_ascii=False)
sock.sock.sendall(s.encode())
def _readobj(self, sock):
"""Read a JSON object"""
buff = b""
while True:
n_to_read = max(2048, len(buff))
chunk = sock.recv(n_to_read)
buff += chunk
if len(chunk) != n_to_read:
try:
return json.loads(buff)
except json.JSONDecodeError:
# There is more to read, continue
# FIXME: this is a workaround for large reads, but we could
# eventually introduce an "end" marker in revaultd's responses,
# such as '\n'.
continue
def __getattr__(self, name):
"""Intercept any call that is not explicitly defined and call @call.
We might still want to define the actual methods in the subclasses for
documentation purposes.
"""
name = name.replace("_", "-")
def wrapper(*args, **kwargs):
if len(args) != 0 and len(kwargs) != 0:
raise RpcError(
name, {}, "Cannot mix positional and non-positional arguments"
)
elif len(args) != 0:
return self.call(name, payload=args)
else:
return self.call(name, payload=list(kwargs.values()))
return wrapper
# FIXME: support named parameters on the Rust server!
def call(self, method, payload=[]):
self.logger.debug("Calling %s with payload %r", method, payload)
# FIXME: we open a new socket for every readobj call...
sock = UnixSocket(self.socket_path)
msg = json.dumps(
{
"jsonrpc": "2.0",
"id": 0,
"method": method,
"params": payload,
}
)
sock.sock.send(msg.encode())
this_id = self.next_id
resp = self._readobj(sock)
self.logger.debug("Received response for %s call: %r", method, resp)
if "id" in resp and resp["id"] != this_id:
raise ValueError(
"Malformed response, id is not {}: {}.".format(this_id, resp)
)
sock.close()
if not isinstance(resp, dict):
raise ValueError(
"Malformed response, response is not a dictionary %s." % resp
)
elif "error" in resp:
raise RpcError(method, payload, resp["error"])
elif "result" not in resp:
raise ValueError('Malformed response, "result" missing.')
return resp["result"]
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Set by inherited classes
self.cmd_line = []
self.prefix = ""
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda _: False
def start(self, stdin=None, stdout=None, stderr=None):
"""Start the underlying process and start monitoring it."""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(
self.cmd_line,
stdin=stdin,
stdout=stdout if stdout else subprocess.PIPE,
stderr=stderr if stderr else subprocess.PIPE,
env=self.env,
)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, "log")
with open(logpath, "w") as f:
for l in self.logs:
f.write(l + "\n")
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
out = self.proc.stdout.readline
err = self.proc.stderr.readline
for line in itertools.chain(iter(out, ""), iter(err, "")):
if len(line) == 0:
break
if self.log_filter(line.decode("utf-8")):
continue
if self.verbose:
logging.debug(f"{self.prefix}: {line.decode().rstrip()}")
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
self.proc.stderr.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug(f"{self.prefix} : Did not find {regex} in logs")
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
with self.logs_cond:
if pos >= len(self.logs):
if not self.running:
raise ValueError("Process died while waiting for logs")
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
|
usage.py
|
import psutil
import GPUtil
from flask import Flask
from dataclasses import dataclass, field
from collections import defaultdict
import logging
from typing import List, Dict
import pickle
from threading import Thread, Event
FORMAT = '[%(asctime)s] %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logger = logging.getLogger()
app = Flask(__name__)
@dataclass
class GpuStats:
id: int
name: str
load: float
mem_used: float
mem_free: float
@dataclass
class MemoryStats:
total: float
free: float
used: float
percent: float
@dataclass
class CpuStats:
percent: float
@dataclass
class SystemMetrics:
exp_name: str
gpu: Dict[str, List[GpuStats]] = field(default_factory=dict)
mem: List[MemoryStats] = field(default_factory=list)
cpu: List[CpuStats] = field(default_factory=list)
def get_cpu_usage() -> CpuStats:
return CpuStats(percent=psutil.cpu_percent(1))
def get_memory_usage() -> MemoryStats:
mem = psutil.virtual_memory()
return MemoryStats(total=mem.total / 1e6, free=mem.free / 1e6, used=mem.used / 1e6, percent=mem.percent)
def get_gpu_usage(gpus: List[GPUtil.GPU]) -> Dict[str, GpuStats]:
stats = {}
for gpu in gpus:
id = gpu.id
stats[id] = GpuStats(
id=gpu.id,
name=gpu.name,
load=gpu.load,
mem_used=gpu.memoryUsed,
mem_free=gpu.memoryFree
)
return stats
def metrics_gathering_loop(name: str, pill: Event):
"""Loop gathering the metrics periodically"""
metrics = SystemMetrics(exp_name=name)
gpus = GPUtil.getGPUs()
while not pill.wait(1):
logger.debug('getting metrics...')
# get the metrics
cpu = get_cpu_usage()
gpu = get_gpu_usage(gpus)
mem = get_memory_usage()
# set the metrics in the object
metrics.cpu.append(cpu)
metrics.mem.append(mem)
for k, v in gpu.items():
if k in metrics.gpu:
metrics.gpu[k].append(v)
else:
metrics.gpu[k] = [v]
# when event sent, save the metrics to the the disk
with open(f'./{metrics.exp_name}.pkl', 'wb') as f:
pickle.dump(metrics.to_dataframe(), f)
running_job_pill: Event = None
# endpoints for the api
@app.route('/new/<string:id>', methods=['PUT'])
def new_task(id: str):
"""Starts a new training loop"""
global running_job_pill
logger.debug(f'computing new task with id {id}')
# create the pill, set it as the current one and start the thread
pill = Event()
t = Thread(target=metrics_gathering_loop, args=(id, pill))
running_job_pill = pill
t.start()
return "collection started", 200
@app.route('/finish', methods=['DELETE'])
def finish_task():
"""Finishes the currently running task"""
global running_job_pill
# try:
running_job_pill.set()
# except Exception as e:
# msg = f'error stopping thread, {e}'
# logger.error(msg)
# return msg, 500
return 'saved experiment', 200
if __name__ == '__main__':
app.run(debug=True)
|
apt.py
|
# pylint: disable=C0111,R0903
"""Displays APT package update information (<to upgrade>/<to remove >)
Requires the following debian packages:
* python-parse
* aptitude
"""
import threading
from parse import *
import bumblebee.util
import bumblebee.input
import bumblebee.output
import bumblebee.engine
APT_CHECK_PATH = ("aptitude full-upgrade --simulate --assume-yes")
PATTERN = "{} packages upgraded, {} newly installed, {} to remove and {} not upgraded."
def parse_result(to_parse):
# We want to line with the iforamtion about package upgrade
line_to_parse = to_parse.split("\n")[-4]
result = parse(PATTERN, line_to_parse)
return int(result[0]), int(result[2])
def get_apt_check_info(widget):
try:
res = bumblebee.util.execute(APT_CHECK_PATH)
widget.set("error", None)
except (RuntimeError, FileNotFoundError) as e:
widget.set("error", "unable to query APT: {}".format(e))
return
to_upgrade = 0
to_remove = 0
try:
to_upgrade, to_remove = parse_result(res)
except e:
widget.set("error", "parse error: {}".format(e))
return
widget.set("to_upgrade", to_upgrade)
widget.set("to_remove", to_remove)
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
widget = bumblebee.output.Widget(full_text=self.updates)
super(Module, self).__init__(engine, config, widget)
self.interval_factor(60)
self.interval(30)
def updates(self, widget):
result = []
if widget.get("error"):
return widget.get("error")
for t in ["to_upgrade", "to_remove"]:
result.append(str(widget.get(t, 0)))
return "/".join(result)
def update(self, widgets):
thread = threading.Thread(target=get_apt_check_info, args=(widgets[0],))
thread.start()
def state(self, widget):
cnt = 0
ret = "good"
for t in ["to_upgrade", "to_remove"]:
cnt += widget.get(t, 0)
if cnt > 50:
ret = "critical"
elif cnt > 0:
ret = "warning"
if widget.get("error"):
ret = "critical"
return ret
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
auth.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
import codecs
import copy
import json
import logging
import tempfile
import time
import uuid
from datetime import datetime
from os import getenv, makedirs, mkdir, path, remove, removedirs, rmdir
from os.path import expanduser
from threading import Lock, Thread
from typing import Dict, Optional, Union
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
load_der_private_key,
load_pem_private_key,
)
from .auth_keypair import AuthByKeyPair
from .auth_usrpwdmfa import AuthByUsrPwdMfa
from .compat import IS_LINUX, IS_MACOS, IS_WINDOWS, urlencode
from .constants import (
HTTP_HEADER_ACCEPT,
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_SERVICE_NAME,
HTTP_HEADER_USER_AGENT,
PARAMETER_CLIENT_REQUEST_MFA_TOKEN,
PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL,
)
from .description import (
COMPILER,
IMPLEMENTATION,
OPERATING_SYSTEM,
PLATFORM,
PYTHON_VERSION,
)
from .errorcode import ER_FAILED_TO_CONNECT_TO_DB
from .errors import (
BadGatewayError,
DatabaseError,
Error,
ForbiddenError,
ProgrammingError,
ServiceUnavailableError,
)
from .network import (
ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
CONTENT_TYPE_APPLICATION_JSON,
ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE,
KEY_PAIR_AUTHENTICATOR,
PYTHON_CONNECTOR_USER_AGENT,
ReauthenticationRequest,
)
from .options import installed_keyring, keyring
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
from .version import VERSION
logger = logging.getLogger(__name__)
# Cache directory
CACHE_ROOT_DIR = (
getenv("SF_TEMPORARY_CREDENTIAL_CACHE_DIR")
or expanduser("~")
or tempfile.gettempdir()
)
if IS_WINDOWS:
CACHE_DIR = path.join(CACHE_ROOT_DIR, "AppData", "Local", "Snowflake", "Caches")
elif IS_MACOS:
CACHE_DIR = path.join(CACHE_ROOT_DIR, "Library", "Caches", "Snowflake")
else:
CACHE_DIR = path.join(CACHE_ROOT_DIR, ".cache", "snowflake")
if not path.exists(CACHE_DIR):
try:
makedirs(CACHE_DIR, mode=0o700)
except Exception as ex:
logger.debug("cannot create a cache directory: [%s], err=[%s]", CACHE_DIR, ex)
CACHE_DIR = None
logger.debug("cache directory: %s", CACHE_DIR)
# temporary credential cache
TEMPORARY_CREDENTIAL = {}
TEMPORARY_CREDENTIAL_LOCK = Lock()
# temporary credential cache file name
TEMPORARY_CREDENTIAL_FILE = "temporary_credential.json"
TEMPORARY_CREDENTIAL_FILE = (
path.join(CACHE_DIR, TEMPORARY_CREDENTIAL_FILE) if CACHE_DIR else ""
)
# temporary credential cache lock directory name
TEMPORARY_CREDENTIAL_FILE_LOCK = TEMPORARY_CREDENTIAL_FILE + ".lck"
# keyring
KEYRING_SERVICE_NAME = "net.snowflake.temporary_token"
KEYRING_USER = "temp_token"
KEYRING_DRIVER_NAME = "SNOWFLAKE-PYTHON-DRIVER"
ID_TOKEN = "ID_TOKEN"
MFA_TOKEN = "MFATOKEN"
class Auth(object):
"""Snowflake Authenticator."""
def __init__(self, rest):
self._rest = rest
@staticmethod
def base_auth_data(
user,
account,
application,
internal_application_name,
internal_application_version,
ocsp_mode,
login_timeout,
network_timeout=None,
):
return {
"data": {
"CLIENT_APP_ID": internal_application_name,
"CLIENT_APP_VERSION": internal_application_version,
"SVN_REVISION": VERSION[3],
"ACCOUNT_NAME": account,
"LOGIN_NAME": user,
"CLIENT_ENVIRONMENT": {
"APPLICATION": application,
"OS": OPERATING_SYSTEM,
"OS_VERSION": PLATFORM,
"PYTHON_VERSION": PYTHON_VERSION,
"PYTHON_RUNTIME": IMPLEMENTATION,
"PYTHON_COMPILER": COMPILER,
"OCSP_MODE": ocsp_mode.name,
"TRACING": logger.getEffectiveLevel(),
"LOGIN_TIMEOUT": login_timeout,
"NETWORK_TIMEOUT": network_timeout,
},
},
}
def authenticate(
self,
auth_instance,
account,
user,
database=None,
schema=None,
warehouse=None,
role=None,
passcode=None,
passcode_in_password=False,
mfa_callback=None,
password_callback=None,
session_parameters=None,
timeout=120,
) -> Dict[str, Union[str, int, bool]]:
logger.debug("authenticate")
if session_parameters is None:
session_parameters = {}
request_id = str(uuid.uuid4())
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if HTTP_HEADER_SERVICE_NAME in session_parameters:
headers[HTTP_HEADER_SERVICE_NAME] = session_parameters[
HTTP_HEADER_SERVICE_NAME
]
url = "/session/v1/login-request"
body_template = Auth.base_auth_data(
user,
account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body = copy.deepcopy(body_template)
# updating request body
logger.debug("assertion content: %s", auth_instance.assertion_content)
auth_instance.update_body(body)
logger.debug(
"account=%s, user=%s, database=%s, schema=%s, "
"warehouse=%s, role=%s, request_id=%s",
account,
user,
database,
schema,
warehouse,
role,
request_id,
)
url_parameters = {"request_id": request_id}
if database is not None:
url_parameters["databaseName"] = database
if schema is not None:
url_parameters["schemaName"] = schema
if warehouse is not None:
url_parameters["warehouse"] = warehouse
if role is not None:
url_parameters["roleName"] = role
url = url + "?" + urlencode(url_parameters)
# first auth request
if passcode_in_password:
body["data"]["EXT_AUTHN_DUO_METHOD"] = "passcode"
elif passcode:
body["data"]["EXT_AUTHN_DUO_METHOD"] = "passcode"
body["data"]["PASSCODE"] = passcode
if session_parameters:
body["data"]["SESSION_PARAMETERS"] = session_parameters
logger.debug(
"body['data']: %s",
{k: v for (k, v) in body["data"].items() if k != "PASSWORD"},
)
try:
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
)
except ForbiddenError as err:
# HTTP 403
raise err.__class__(
msg=(
"Failed to connect to DB. "
"Verify the account name is correct: {host}:{port}. "
"{message}"
).format(
host=self._rest._host, port=self._rest._port, message=str(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
except (ServiceUnavailableError, BadGatewayError) as err:
# HTTP 502/504
raise err.__class__(
msg=(
"Failed to connect to DB. "
"Service is unavailable: {host}:{port}. "
"{message}"
).format(
host=self._rest._host, port=self._rest._port, message=str(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
# waiting for MFA authentication
if ret["data"].get("nextAction") == "EXT_AUTHN_DUO_ALL":
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
body["data"]["EXT_AUTHN_DUO_METHOD"] = "push"
self.ret = {"message": "Timeout", "data": {}}
def post_request_wrapper(self, url, headers, body):
# get the MFA response
self.ret = self._rest._post_request(
url, headers, body, timeout=self._rest._connection.login_timeout
)
# send new request to wait until MFA is approved
t = Thread(
target=post_request_wrapper, args=[self, url, headers, json.dumps(body)]
)
t.daemon = True
t.start()
if callable(mfa_callback):
c = mfa_callback()
while not self.ret or self.ret.get("message") == "Timeout":
next(c)
else:
t.join(timeout=timeout)
ret = self.ret
if ret and ret["data"].get("nextAction") == "EXT_AUTHN_SUCCESS":
body = copy.deepcopy(body_template)
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
# final request to get tokens
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
)
elif not ret or not ret["data"].get("token"):
# not token is returned.
Error.errorhandler_wrapper(
self._rest._connection,
None,
DatabaseError,
{
"msg": (
"Failed to connect to DB. MFA "
"authentication failed: {"
"host}:{port}. {message}"
).format(
host=self._rest._host,
port=self._rest._port,
message=ret["message"],
),
"errno": ER_FAILED_TO_CONNECT_TO_DB,
"sqlstate": SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
},
)
return session_parameters # required for unit test
elif ret["data"].get("nextAction") == "PWD_CHANGE":
if callable(password_callback):
body = copy.deepcopy(body_template)
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
body["data"]["LOGIN_NAME"] = user
body["data"]["PASSWORD"] = (
auth_instance.password
if hasattr(auth_instance, "password")
else None
)
body["data"]["CHOSEN_NEW_PASSWORD"] = password_callback()
# New Password input
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
)
logger.debug("completed authentication")
if not ret["success"]:
errno = ret.get("code", ER_FAILED_TO_CONNECT_TO_DB)
if errno == ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE:
# clear stored id_token if failed to connect because of id_token
# raise an exception for reauth without id_token
self._rest.id_token = None
delete_temporary_credential(self._rest._host, user, ID_TOKEN)
raise ReauthenticationRequest(
ProgrammingError(
msg=ret["message"],
errno=int(errno),
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
)
if type(auth_instance) is AuthByKeyPair:
logger.debug(
"JWT Token authentication failed. "
"Token expires at: %s. "
"Current Time: %s",
str(auth_instance._jwt_token_exp),
str(datetime.utcnow()),
)
if type(auth_instance) is AuthByUsrPwdMfa:
delete_temporary_credential(self._rest._host, user, MFA_TOKEN)
Error.errorhandler_wrapper(
self._rest._connection,
None,
DatabaseError,
{
"msg": (
"Failed to connect to DB: {host}:{port}. " "{message}"
).format(
host=self._rest._host,
port=self._rest._port,
message=ret["message"],
),
"errno": ER_FAILED_TO_CONNECT_TO_DB,
"sqlstate": SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
},
)
else:
logger.debug(
"token = %s", "******" if ret["data"]["token"] is not None else "NULL"
)
logger.debug(
"master_token = %s",
"******" if ret["data"]["masterToken"] is not None else "NULL",
)
logger.debug(
"id_token = %s",
"******" if ret["data"].get("idToken") is not None else "NULL",
)
logger.debug(
"mfa_token = %s",
"******" if ret["data"].get("mfaToken") is not None else "NULL",
)
self._rest.update_tokens(
ret["data"]["token"],
ret["data"]["masterToken"],
master_validity_in_seconds=ret["data"].get("masterValidityInSeconds"),
id_token=ret["data"].get("idToken"),
mfa_token=ret["data"].get("mfaToken"),
)
self.write_temporary_credentials(
self._rest._host, user, session_parameters, ret
)
if "sessionId" in ret["data"]:
self._rest._connection._session_id = ret["data"]["sessionId"]
if "sessionInfo" in ret["data"]:
session_info = ret["data"]["sessionInfo"]
self._rest._connection._database = session_info.get("databaseName")
self._rest._connection._schema = session_info.get("schemaName")
self._rest._connection._warehouse = session_info.get("warehouseName")
self._rest._connection._role = session_info.get("roleName")
if "parameters" in ret["data"]:
session_parameters.update(
{p["name"]: p["value"] for p in ret["data"]["parameters"]}
)
self._rest._connection._update_parameters(session_parameters)
return session_parameters
def _read_temporary_credential(self, host, user, cred_type):
cred = None
if IS_MACOS or IS_WINDOWS:
if not installed_keyring:
logger.debug(
"Dependency 'keyring' is not installed, cannot cache id token. You might experience "
"multiple authentication pop ups while using ExternalBrowser Authenticator. To avoid "
"this please install keyring module using the following command : pip install "
"snowflake-connector-python[secure-local-storage]"
)
return
try:
cred = keyring.get_password(
build_temporary_credential_name(host, user, cred_type), user.upper()
)
except keyring.errors.KeyringError as ke:
logger.error(
"Could not retrieve {} from secure storage : {}".format(
cred_type, str(ke)
)
)
elif IS_LINUX:
read_temporary_credential_file()
cred = TEMPORARY_CREDENTIAL.get(host.upper(), {}).get(
build_temporary_credential_name(host, user, cred_type)
)
else:
logger.debug("OS not supported for Local Secure Storage")
return cred
def read_temporary_credentials(self, host, user, session_parameters):
if session_parameters.get(PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL, False):
self._rest.id_token = self._read_temporary_credential(host, user, ID_TOKEN)
if session_parameters.get(PARAMETER_CLIENT_REQUEST_MFA_TOKEN, False):
self._rest.mfa_token = self._read_temporary_credential(
host, user, MFA_TOKEN
)
def _write_temporary_credential(self, host, user, cred_type, cred):
if not cred:
logger.debug(
"no credential is given when try to store temporary credential"
)
return
if IS_MACOS or IS_WINDOWS:
if not installed_keyring:
logger.debug(
"Dependency 'keyring' is not installed, cannot cache id token. You might experience "
"multiple authentication pop ups while using ExternalBrowser Authenticator. To avoid "
"this please install keyring module using the following command : pip install "
"snowflake-connector-python[secure-local-storage]"
)
return
try:
keyring.set_password(
build_temporary_credential_name(host, user, cred_type),
user.upper(),
cred,
)
except keyring.errors.KeyringError as ke:
logger.error("Could not store id_token to keyring, %s", str(ke))
elif IS_LINUX:
write_temporary_credential_file(
host, build_temporary_credential_name(host, user, cred_type), cred
)
else:
logger.debug("OS not supported for Local Secure Storage")
def write_temporary_credentials(self, host, user, session_parameters, response):
if self._rest._connection.consent_cache_id_token and session_parameters.get(
PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL, False
):
self._write_temporary_credential(
host, user, ID_TOKEN, response["data"].get("idToken")
)
if session_parameters.get(PARAMETER_CLIENT_REQUEST_MFA_TOKEN, False):
self._write_temporary_credential(
host, user, MFA_TOKEN, response["data"].get("mfaToken")
)
return
def flush_temporary_credentials():
"""Flush temporary credentials in memory into disk. Need to hold TEMPORARY_CREDENTIAL_LOCK."""
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_FILE
for _ in range(10):
if lock_temporary_credential_file():
break
time.sleep(1)
else:
logger.debug(
"The lock file still persists after the maximum wait time."
"Will ignore it and write temporary credential file: %s",
TEMPORARY_CREDENTIAL_FILE,
)
try:
with open(
TEMPORARY_CREDENTIAL_FILE, "w", encoding="utf-8", errors="ignore"
) as f:
json.dump(TEMPORARY_CREDENTIAL, f)
except Exception as ex:
logger.debug(
"Failed to write a credential file: " "file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
finally:
unlock_temporary_credential_file()
def write_temporary_credential_file(host, cred_name, cred):
"""Writes temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
with TEMPORARY_CREDENTIAL_LOCK:
# update the cache
host_data = TEMPORARY_CREDENTIAL.get(host.upper(), {})
host_data[cred_name.upper()] = cred
TEMPORARY_CREDENTIAL[host.upper()] = host_data
flush_temporary_credentials()
def read_temporary_credential_file():
"""Reads temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
global TEMPORARY_CREDENTIAL_FILE
with TEMPORARY_CREDENTIAL_LOCK:
for _ in range(10):
if lock_temporary_credential_file():
break
time.sleep(1)
else:
logger.debug(
"The lock file still persists. Will ignore and "
"write the temporary credential file: %s",
TEMPORARY_CREDENTIAL_FILE,
)
try:
with codecs.open(
TEMPORARY_CREDENTIAL_FILE, "r", encoding="utf-8", errors="ignore"
) as f:
TEMPORARY_CREDENTIAL = json.load(f)
return TEMPORARY_CREDENTIAL
except Exception as ex:
logger.debug(
"Failed to read a credential file. The file may not"
"exists: file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
finally:
unlock_temporary_credential_file()
return None
def lock_temporary_credential_file():
global TEMPORARY_CREDENTIAL_FILE_LOCK
try:
mkdir(TEMPORARY_CREDENTIAL_FILE_LOCK)
return True
except OSError:
logger.debug(
"Temporary cache file lock already exists. Other "
"process may be updating the temporary "
)
return False
def unlock_temporary_credential_file():
global TEMPORARY_CREDENTIAL_FILE_LOCK
try:
rmdir(TEMPORARY_CREDENTIAL_FILE_LOCK)
return True
except OSError:
logger.debug("Temporary cache file lock no longer exists.")
return False
def delete_temporary_credential(host, user, cred_type):
if (IS_MACOS or IS_WINDOWS) and installed_keyring:
try:
keyring.delete_password(
build_temporary_credential_name(host, user, cred_type), user.upper()
)
except Exception as ex:
logger.error("Failed to delete credential in the keyring: err=[%s]", ex)
elif IS_LINUX:
temporary_credential_file_delete_password(host, user, cred_type)
def temporary_credential_file_delete_password(host, user, cred_type):
"""Remove credential from temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
with TEMPORARY_CREDENTIAL_LOCK:
# update the cache
host_data = TEMPORARY_CREDENTIAL.get(host.upper(), {})
host_data.pop(build_temporary_credential_name(host, user, cred_type), None)
if not host_data:
TEMPORARY_CREDENTIAL.pop(host.upper(), None)
else:
TEMPORARY_CREDENTIAL[host.upper()] = host_data
flush_temporary_credentials()
def delete_temporary_credential_file():
"""Deletes temporary credential file and its lock file."""
global TEMPORARY_CREDENTIAL_FILE
try:
remove(TEMPORARY_CREDENTIAL_FILE)
except Exception as ex:
logger.debug(
"Failed to delete a credential file: " "file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
try:
removedirs(TEMPORARY_CREDENTIAL_FILE_LOCK)
except Exception as ex:
logger.debug("Failed to delete credential lock file: err=[%s]", ex)
def build_temporary_credential_name(host, user, cred_type):
return "{host}:{user}:{driver}:{cred}".format(
host=host.upper(), user=user.upper(), driver=KEYRING_DRIVER_NAME, cred=cred_type
)
def get_token_from_private_key(
user: str, account: str, privatekey_path: str, key_password: Optional[str]
) -> str:
encoded_password = key_password.encode() if key_password is not None else None
with open(privatekey_path, "rb") as key:
p_key = load_pem_private_key(
key.read(), password=encoded_password, backend=default_backend()
)
private_key = p_key.private_bytes(
encoding=Encoding.DER,
format=PrivateFormat.PKCS8,
encryption_algorithm=NoEncryption(),
)
auth_instance = AuthByKeyPair(private_key, 1440 * 60) # token valid for 24 hours
return auth_instance.authenticate(
KEY_PAIR_AUTHENTICATOR, None, account, user, key_password
)
def get_public_key_fingerprint(private_key_file: str, password: str) -> str:
"""Helper function to generate the public key fingerprint from the private key file"""
with open(private_key_file, "rb") as key:
p_key = load_pem_private_key(
key.read(), password=password.encode(), backend=default_backend()
)
private_key = p_key.private_bytes(
encoding=Encoding.DER,
format=PrivateFormat.PKCS8,
encryption_algorithm=NoEncryption(),
)
private_key = load_der_private_key(
data=private_key, password=None, backend=default_backend()
)
return AuthByKeyPair.calculate_public_key_fingerprint(private_key)
|
using_with_statement.py
|
import threading
import logging
"""
Mostra que um lock pode ser adquirido usando with ao invés de usar lock.aquire()
Isso foi testado para lock, RLock, Semáforo, Condição e semáforo
"""
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',)
def threading_with(statement):
with statement:
logging.debug('%s acquired via with' %statement)
def threading_not_with(statement):
statement.acquire()
try:
logging.debug('%s acquired directly' %statement )
finally:
statement.release()
if __name__ == '__main__':
#let's create a test battery
lock = threading.Lock()
rlock = threading.RLock()
condition = threading.Condition()
mutex = threading.Semaphore(1)
threading_synchronization_list = [lock ,rlock , condition , mutex]
#in the for cycle we call the threading_with e threading_no_with function
for statement in threading_synchronization_list :
t1 = threading.Thread(target=threading_with, args=(statement,))
t2 = threading.Thread(target=threading_not_with, args=(statement,))
t1.start()
t2.start()
t1.join()
t2.join()
|
vg_to_imdb.py
|
# coding=utf8
import argparse, os, json, string
from queue import Queue
from threading import Thread, Lock
import h5py
import numpy as np
from scipy.misc import imread, imresize
def build_filename_dict(data):
# First make sure all basenames are unique
basenames_list = [os.path.basename(img['image_path']) for img in data]
assert len(basenames_list) == len(set(basenames_list))
next_idx = 1
filename_to_idx, idx_to_filename = {}, {}
for img in data:
filename = os.path.basename(img['image_path'])
filename_to_idx[filename] = next_idx
idx_to_filename[next_idx] = filename
next_idx += 1
return filename_to_idx, idx_to_filename
def encode_filenames(data, filename_to_idx):
filename_idxs = []
for img in data:
filename = os.path.basename(img['image_path'])
idx = filename_to_idx[filename]
filename_idxs.append(idx)
return np.asarray(filename_idxs, dtype=np.int32)
def add_images(im_data, h5_file, args):
fns = []; ids = []; idx = []
corrupted_ims = ['1592.jpg', '1722.jpg', '4616.jpg', '4617.jpg']
for i, img in enumerate(im_data):
basename = str(img['image_id']) + '.jpg'
if basename in corrupted_ims:
continue
filename = os.path.join(args.image_dir, basename)
if os.path.exists(filename):
fns.append(filename)
ids.append(img['image_id'])
idx.append(i)
ids = np.array(ids, dtype=np.int32)
idx = np.array(idx, dtype=np.int32)
h5_file.create_dataset('image_ids', data=ids)
h5_file.create_dataset('valid_idx', data=idx)
num_images = len(fns)
shape = (num_images, 3, args.image_size, args.image_size)
image_dset = h5_file.create_dataset('images', shape, dtype=np.uint8)
original_heights = np.zeros(num_images, dtype=np.int32)
original_widths = np.zeros(num_images, dtype=np.int32)
image_heights = np.zeros(num_images, dtype=np.int32)
image_widths = np.zeros(num_images, dtype=np.int32)
lock = Lock()
q = Queue()
for i, fn in enumerate(fns):
q.put((i, fn))
def worker():
while True:
i, filename = q.get()
if i % 10000 == 0:
print('processing %i images...' % i)
img = imread(filename)
# handle grayscale
if img.ndim == 2:
img = img[:, :, None][:, :, [0, 0, 0]]
H0, W0 = img.shape[0], img.shape[1]
img = imresize(img, float(args.image_size) / max(H0, W0))
H, W = img.shape[0], img.shape[1]
# swap rgb to bgr. This can't be the best way right? #fail
r = img[:,:,0].copy()
img[:,:,0] = img[:,:,2]
img[:,:,2] = r
lock.acquire()
original_heights[i] = H0
original_widths[i] = W0
image_heights[i] = H
image_widths[i] = W
image_dset[i, :, :H, :W] = img.transpose(2, 0, 1)
lock.release()
q.task_done()
for i in range(args.num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
h5_file.create_dataset('image_heights', data=image_heights)
h5_file.create_dataset('image_widths', data=image_widths)
h5_file.create_dataset('original_heights', data=original_heights)
h5_file.create_dataset('original_widths', data=original_widths)
return fns
def main(args):
im_metadata = json.load(open(args.metadata_input))
h5_fn = 'imdb_' + str(args.image_size) + '.h5'
# write the h5 file
h5_file = os.path.join(args.imh5_dir, h5_fn)
f = h5py.File(h5_file, 'w')
# load images
im_fns = add_images(im_metadata, f, args)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', default='VG/images')
parser.add_argument('--image_size', default=1024, type=int)
parser.add_argument('--imh5_dir', default='.')
parser.add_argument('--num_workers', default=20, type=int)
parser.add_argument('--metadata_input', default='VG/image_data.json', type=str)
args = parser.parse_args()
main(args)
|
tcp_server.py
|
import socketserver as ss
import threading
from typing import cast, List, Optional, Tuple
from typing_extensions import Type
import netifaces
from ymmsl import Reference
from libmuscle.mcp.server import Server
from libmuscle.mcp.tcp_util import (recv_all, recv_int64, send_int64,
SocketClosed)
from libmuscle.post_office import PostOffice
class TcpServerImpl(ss.ThreadingMixIn, ss.TCPServer):
daemon_threads = True
def __init__(self, host_port_tuple: Tuple[str, int],
streamhandler: Type, tcp_server: 'TcpServer'
) -> None:
super().__init__(host_port_tuple, streamhandler)
self.tcp_server = tcp_server
class TcpHandler(ss.BaseRequestHandler):
"""Handler for MCP-over-TCP connections.
"""
def handle(self) -> None:
"""Handles requests on a socket
"""
receiver_id = self.receive_request()
while receiver_id is not None:
server = cast(TcpServerImpl, self.server).tcp_server
message = server.post_office.get_message(receiver_id)
send_int64(self.request, len(message))
self.request.sendall(message)
receiver_id = self.receive_request()
def receive_request(self) -> Optional[Reference]:
"""Receives a request (receiver id).
Returns:
The received receiver id.
"""
try:
length = recv_int64(self.request)
reqbuf = recv_all(self.request, length)
return Reference(reqbuf.decode('utf-8'))
except SocketClosed:
return None
class TcpServer(Server):
"""A server that accepts MCP connections over TCP.
"""
def __init__(self, instance_id: Reference, post_office: PostOffice
) -> None:
"""Create a TCPServer.
Args:
instance_id: Id of the instance we're a server for.
post_office: A PostOffice to obtain data from.
"""
super().__init__(instance_id, post_office)
self._server = TcpServerImpl(('', 0), TcpHandler, self)
self._server_thread = threading.Thread(
target=self._server.serve_forever, daemon=True)
self._server_thread.start()
def get_location(self) -> str:
"""Returns the location this server listens on.
Returns:
A string containing the location.
"""
host, port = self._server.server_address
locs = list() # type: List[str]
for address in self._get_if_addresses():
locs.append('{}:{}'.format(address, port))
return 'tcp:{}'.format(','.join(locs))
def close(self) -> None:
"""Closes this server.
Stops the server listening, waits for existing clients to
disconnect, then frees any other resources.
"""
self._server.shutdown()
self._server_thread.join()
@property
def post_office(self) -> PostOffice:
"""Export this so the server thread can use it.
"""
return self._post_office
def _get_if_addresses(self) -> List[str]:
all_addresses = list() # type: List[str]
ifs = netifaces.interfaces()
for interface in ifs:
addrs = netifaces.ifaddresses(interface)
for props in addrs.get(netifaces.AF_INET, []):
all_addresses.append(props['addr'])
for props in addrs.get(netifaces.AF_INET6, []):
# filter out link-local addresses with a scope id
if '%' not in props['addr']:
all_addresses.append('[' + props['addr'] + ']')
return all_addresses
|
interface.py
|
import sys, time, json, threading
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject, QTimer, Qt, QModelIndex, qInstallMessageHandler, QPoint
from Interface.ui_classes import *
from Interface.problem_ui import *
from Interface.submission_ui import *
from Interface.accounts_edit_ui import *
from Interface.password_change_ui import *
from Interface.query_reply_ui import *
from Interface.new_accounts_ui import *
from Interface.ie_accounts_ui import *
from Interface.rejudge_problem_ui import *
from Interface.judge_view_ui import *
from Interface.generate_report_ui import *
from Interface.interface_updates import *
from init_server import initialize_server, save_status
from database_management import *
# This is to ignore some warnings which were thrown when gui exited and
# python deleted some assests in wrong order
# Nothing critical :)
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
# This class handles the main interface window of server
class server_window(QMainWindow):
def __init__(self, data_changed_flags2, task_queue, log_queue, update_queue, db_list, lock):
super().__init__()
# Set app icon
self.setWindowIcon(QIcon('Elements/logo1.png'))
# Set window title
self.setWindowTitle('BitsOJ v1.0.1 [ SERVER ]')
# make shared objects accessible from the class methods
self.data_changed_flags = data_changed_flags2
self.task_queue = task_queue
self.log_queue = log_queue
self.update_queue = update_queue
self.db_list = db_list
self.lock = lock
# Make the app run full-screen
# Initialize status bar (Bottom Bar)
self.status = self.statusBar()
self.setMinimumSize(1366, 768)
# Timer to update GUI and broadcast scoreboard
self.timer = QTimer()
self.timer.timeout.connect(self.update_data)
self.timer.start(1000)
self.data_timer = QTimer()
self.data_timer.timeout.connect(self.update_tables)
self.data_timer.start(500)
self.scoreboard_timer = QTimer()
self.scoreboard_timer.timeout.connect(self.broadcast_scoreboard)
self.scoreboard_timer.start(300000)
self.log(' [ START ] Interface subprocess started.')
###########################################################
# Default leaderboard query
self.leaderboard_query = "SELECT * FROM scoreboard WHERE is_hidden = 'False' ORDER BY score DESC, total_time ASC"
###########################################################
self.config = initialize_server.read_config()
self.contest_set_time = self.config['Contest Set Time']
self.duration = self.config['Contest Duration']
self.contest_start_time = ''
###########################################################
self.admin_password = self.read_password()
###########################################################
# Define Sidebar Buttons and their actions
button_width = 200
button_height = 50
self.button_0 = QPushButton('Accounts', self)
self.button_0.setFixedSize(button_width, button_height)
self.button_0.clicked.connect(self.manage_accounts)
self.button_0.setObjectName("sidebar_button")
self.button_0.setAutoDefault(True)
self.button_1 = QPushButton('Submissions', self)
self.button_1.setFixedSize(button_width, button_height)
self.button_1.clicked.connect(self.view_submissions)
self.button_1.setObjectName("sidebar_button")
self.button_2 = QPushButton('Judges', self)
self.button_2.setFixedSize(button_width, button_height)
self.button_2.clicked.connect(self.manage_judges)
self.button_2.setObjectName("sidebar_button")
self.button_3 = QPushButton('Clients', self)
self.button_3.setFixedSize(button_width, button_height)
self.button_3.clicked.connect(self.manage_clients)
self.button_3.setObjectName("sidebar_button")
self.button_4 = QPushButton('Queries', self)
self.button_4.setFixedSize(button_width, button_height)
self.button_4.clicked.connect(self.manage_queries)
self.button_4.setObjectName("sidebar_button")
self.button_5 = QPushButton('Leaderboard', self)
self.button_5.setFixedSize(button_width, button_height)
self.button_5.clicked.connect(self.manage_leaderboard)
self.button_5.setObjectName("sidebar_button")
self.button_6 = QPushButton('Problems', self)
self.button_6.setFixedSize(button_width, button_height)
self.button_6.clicked.connect(self.manage_problems)
self.button_6.setObjectName("sidebar_button")
self.button_7 = QPushButton('Statistics', self)
self.button_7.setFixedSize(button_width, button_height)
self.button_7.clicked.connect(self.show_stats)
self.button_7.setObjectName("sidebar_button")
self.button_8 = QPushButton('Settings', self)
self.button_8.setFixedSize(button_width, button_height)
self.button_8.clicked.connect(self.contest_settings)
self.button_8.setObjectName("sidebar_button")
self.button_9 = QPushButton('About', self)
self.button_9.setFixedSize(button_width, button_height)
self.button_9.clicked.connect(self.show_about)
self.button_9.setObjectName("sidebar_button")
self.button_10 = QPushButton('Lock', self)
self.button_10.setFixedSize(button_width, button_height)
self.button_10.clicked.connect(self.set_lock)
self.button_10.setObjectName("sidebar_button")
###########################################################
###########################################################
# Manage tabs on the right window
# Each tab is an object returned by the respective function associated with its UI
# Tab UI are managed by interface_packages/ui_classes.py file
self.tab0, self.account_model, self.delete_account_button = ui_widgets.accounts_ui(self)
self.tab1, self.sub_model = ui_widgets.submissions_ui(self)
self.tab2, self.judge_model = ui_widgets.judge_ui(self)
self.tab3, self.client_model = ui_widgets.client_ui(self)
self.tab4, self.query_model = ui_widgets.query_ui(self)
self.tab5, self.score_model, self.scoring_type_label = ui_widgets.leaderboard_ui(self)
self.tab6, self.problem_model = ui_widgets.problem_ui(self)
self.tab7 = ui_widgets.stats_ui(self)
(
self.tab8, self.contest_time_entry, self.change_time_entry, self.set_button,
self.start_button, self.update_button, self.stop_button, self.account_reset_button,
self.submission_reset_button, self.query_reset_button, self.client_reset_button,
self.server_reset_button, self.timer_reset_button
) = ui_widgets.settings_ui(self)
self.tab9 = ui_widgets.about_us_ui(self)
self.tab10 = ui_widgets.lock_ui(self)
###########################################################
# Initialize GUI elements
server_window.init_UI(self)
# Load previous state in case of server restart
server_window.load_previous_state(self)
return
def log(self, message):
self.log_queue.put(message)
def init_UI(self):
self.set_status('SETUP')
# Define Layout for sidebar
side_bar_layout = QVBoxLayout()
# Shadow effect initialisation
shadow_effect = QGraphicsDropShadowEffect()
shadow_effect.setBlurRadius(5)
shadow_effect.setOffset(0)
shadow_effect.setColor(QColor(255, 255, 255, 255 * 0.3))
# Add buttons to our layout
side_bar_layout.addWidget(self.button_0)
side_bar_layout.addWidget(self.button_1)
side_bar_layout.addWidget(self.button_2)
side_bar_layout.addWidget(self.button_3)
side_bar_layout.addWidget(self.button_4)
side_bar_layout.addWidget(self.button_5)
side_bar_layout.addWidget(self.button_6)
side_bar_layout.addWidget(self.button_7)
side_bar_layout.addWidget(self.button_8)
side_bar_layout.addWidget(self.button_9)
# Add some spacing for lock button
side_bar_layout.addStretch(33)
side_bar_layout.addWidget(self.button_10)
# Set stretch and spacing
side_bar_layout.addStretch(1)
side_bar_layout.setSpacing(0)
# Define sidebar widget and set side_bar_layout to it.
side_bar_widget = QWidget()
side_bar_widget.setLayout(side_bar_layout)
side_bar_widget.setFixedWidth(215)
side_bar_widget.setObjectName("sidebar")
#Define top bar
logo = QLabel(self)
logo_image = QPixmap('Elements/header_2.png')
logo_image = logo_image.scaledToWidth(104)
logo.setPixmap(logo_image)
contest_theme = self.config['Contest Theme']
contest_name = QLabel(self.config['Contest Name'] + ' : ' + contest_theme)
contest_name.setObjectName('main_screen_sub_heading')
self.timer_widget = QLCDNumber()
self.timer_widget.setSegmentStyle(QLCDNumber.Flat)
self.timer_widget.setDigitCount(8)
self.timer_widget.display('00:00:00')
self.timer_widget.setFixedSize(150,50)
top_bar_layout = QHBoxLayout()
top_bar_layout.setContentsMargins(15, 5, 20, 0);
top_bar_layout.addWidget(logo)
top_bar_layout.addStretch(10)
top_bar_layout.addWidget(contest_name)
top_bar_layout.addStretch(9)
top_bar_layout.addWidget(self.timer_widget)
top_bar_widget = QWidget()
top_bar_widget.setLayout(top_bar_layout)
top_bar_widget.setObjectName('top_bar')
top_bar_widget.setGraphicsEffect(shadow_effect)
# Define our right side screens corresponding to buttons on the sidebar
# Basically right screens are tab widgets whose tabs are hidden,
# and we map sidebar buttons to each tab switch :)
# Since sidebars are not natively supported by pyqt5
# tab names are '' because we don't want them to show up in our screen
self.right_widget = QTabWidget()
self.right_widget.addTab(self.tab0, '')
self.right_widget.addTab(self.tab1, '')
self.right_widget.addTab(self.tab2, '')
self.right_widget.addTab(self.tab3, '')
self.right_widget.addTab(self.tab4, '')
self.right_widget.addTab(self.tab5, '')
self.right_widget.addTab(self.tab6, '')
self.right_widget.addTab(self.tab7, '')
self.right_widget.addTab(self.tab8, '')
self.right_widget.addTab(self.tab9, '')
self.right_widget.addTab(self.tab10, '')
self.right_widget.setObjectName("main_tabs")
self.right_widget.setContentsMargins(0, 0, 0, 0)
# Screen 1 will be our initial screen
self.right_widget.setCurrentIndex(0)
# Define the combined layout for sidebar + right side screens
main_layout = QHBoxLayout()
main_layout.addWidget(side_bar_widget)
main_layout.addWidget(self.right_widget)
main_layout.setContentsMargins(0, 0, 22, 10)
main_layout.setStretch(0, 0)
main_layout.setStretch(1, 90)
# Define our main wideget = sidebar + windows
main_widget = QWidget()
main_widget.setObjectName("screen_widget")
main_widget.setLayout(main_layout)
#Define top_layout = top_bar + main_widget
top_layout = QVBoxLayout()
top_layout.addWidget(top_bar_widget)
top_layout.addWidget(main_widget)
top_layout.setContentsMargins(0, 0, 0, 0)
top_layout.setStretch(0, 10)
top_layout.setStretch(1, 90)
top_widget = QWidget()
top_widget.setLayout(top_layout)
top_widget.setObjectName("main_widget")
# top_widget.setGraphicsEffect(shadow_effect)
# Set top_widget as our central widget
self.setCentralWidget(top_widget)
return
def read_password(self):
return self.config['Admin Password']
@pyqtSlot()
def manage_accounts(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(0)
@pyqtSlot()
def view_submissions(self):
if self.data_changed_flags[24] != 1:
self.data_changed_flags[0] = 0
self.button_1.setObjectName('sidebar_button')
self.button_1.setStyleSheet('')
self.button_1.update()
self.right_widget.setCurrentIndex(1)
@pyqtSlot()
def manage_judges(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(2)
@pyqtSlot()
def manage_clients(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(3)
@pyqtSlot()
def manage_queries(self):
if self.data_changed_flags[24] != 1:
self.data_changed_flags[1] = 0
self.button_4.setObjectName('sidebar_button')
self.button_4.setStyleSheet('')
self.button_4.update()
self.right_widget.setCurrentIndex(4)
@pyqtSlot()
def manage_leaderboard(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(5)
@pyqtSlot()
def manage_problems(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(6)
@pyqtSlot()
def show_stats(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(7)
@pyqtSlot()
def contest_settings(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(8)
@pyqtSlot()
def show_about(self):
if self.data_changed_flags[24] != 1:
self.right_widget.setCurrentIndex(9)
@pyqtSlot()
def set_lock(self):
print('[ GUI ][ LOCK ] Server GUI has been locked.')
self.log('[ GUI ][ LOCK ] Server GUI has been locked.')
self.data_changed_flags[24] = 1
self.right_widget.setCurrentIndex(10)
####################################################
# Functions related to GUI updates
def load_previous_state(self):
server_window.set_table_data(self)
if self.config["Contest Status"] == "RUNNING":
server_window.set_button_behavior(self, 'RUNNING')
elif self.config["Contest Status"] == "STOPPED":
server_window.set_button_behavior(self, 'STOPPED')
elif self.config["Contest Status"] == "SETUP":
server_window.set_button_behavior(self, 'SETUP')
# TODO: When server restarts, pop up a new notification about contest status
return
def broadcast_scoreboard(self):
# If scoreboard broadcast is allowed and contest is running
if self.data_changed_flags[15] == 1 and self.data_changed_flags[10] == 1:
# Just set this flag, update data will take care of it
self.data_changed_flags[18] = 1
print('[ EVENT ] Scoreboard broadcast to clients ( Time Out )')
self.log('[ EVENT ] Scoreboard broadcast to clients ( Time Out )')
def update_data(self):
try:
# Update contest clock
if self.data_changed_flags[10] == 1:
# Find time elapsed since contest start
total_time = self.contest_set_time
current_time = time.time()
time_difference = total_time - current_time
remaining_time = time.strftime('%H:%M:%S', time.gmtime(time_difference))
# When remaining time is less than 0, contest has ended
if time_difference < 0:
# Contest time ended
self.timer_widget.display('00:00:00')
self.process_event('STOP')
return
# Update timer
self.timer_widget.display(remaining_time)
if self.data_changed_flags[26] == 1:
self.data_changed_flags[26] = 2
# Connection failure!
info_box = QMessageBox()
info_box.setIcon(QMessageBox.Critical)
info_box.setWindowTitle('CRITICAL')
info_box.setText('Connection to RabbitMQ broker lost!\nRestart Server after resolving the issue.')
info_box.setStandardButtons(QMessageBox.Ok)
info_box.exec_()
self.data_changed_flags[7] = 1
self.log_queue.put("[ EXIT ] ABNORMAL SYSTEM EXIT")
self.close()
# New Submission : Show indication
if self.data_changed_flags[0] == 1 and self.right_widget.currentIndex != 1:
self.button_1.setStyleSheet('border-right : 10px solid #FFBF00;}')
self.button_1.update()
# New query : Show indication
if self.data_changed_flags[1] == 1 and self.right_widget.currentIndex != 4:
self.button_4.setStyleSheet('border-right : 10px solid #FFBF00;}')
self.button_4.update()
# System EXIT
if self.data_changed_flags[7] == 1:
if self.data_changed_flags[5] == 1:
print('[ UI ][ ERROR ] Cannot exit : Verdicts are being released')
return
print('[ UI ] EXIT')
sys.exit()
if self.data_changed_flags[19] == 1:
self.data_changed_flags[19] = 0
# Broadcast UPDATE signal
total_time = self.contest_set_time
current_time = time.time()
remaining_time = time.strftime('%H:%M:%S', time.gmtime(total_time - current_time ))
message = {
'Code' : 'UPDATE',
'Time' : remaining_time
}
message = json.dumps(message)
self.task_queue.put(message)
# Broadcast scoreboard now
if self.data_changed_flags[18] == 1:
print('[ SCOREBOARD ][ BROADCAST ]')
self.data_changed_flags[18] = 0
# Broadcast scoreboard
scoreboard = scoreboard_management.get_scoreboard()
data = str(scoreboard)
message = {
'Code' : 'SCRBD',
'Data' : data
}
message = json.dumps(message)
self.task_queue.put(message)
# If manual reviews have been turned OFF
if self.data_changed_flags[25] == 1:
print('[ UI ] Sending responses for held submissions... ')
self.data_changed_flags[25] = 0
release_thread = threading.Thread( target = self.release_held_verdicts )
self.data_changed_flags[5] = 1
release_thread.start()
print('[ UI ] All held responses sent!')
except Exception as error:
print('[ ERROR ] Interface updation error: ' + str(error))
self.log('[ ERROR ] Interface updation error: ' + str(error))
return
def release_held_verdicts(self):
# Get the data of all those run ids whose status is REVIEW:
print('[ UI ] Release process started.')
submissions = submissions_management.get_held_submissions()
for submission in submissions:
run_id = submission[0]
client_id = submission[1]
local_run_id = submission[2]
source_file = submission[3]
problem_code = submission[4]
verdict = submission[5]
judge = submission[6]
timestamp = submission[7]
client_username = client_authentication.get_client_username(client_id)
print('[ UI ] Releasing Run ', run_id)
# Get message of submission
try:
filename = './Client_Submissions/' + str(run_id) + '_latest.info'
with open(filename) as file:
data = file.read()
if data != '':
error_data = data
else:
error_data = 'No Error data received!'
except:
error_data = 'No Error data received!'
# For each run id, send response
message = {
'Code' : 'VRDCT',
'Receiver' : client_username,
'Local Run ID' : local_run_id,
'Run ID' : run_id,
'Status' : verdict,
'Message' : error_data,
'Judge' : judge,
'Client ID' : client_id,
'Problem Code' : problem_code,
'Timestamp' : timestamp
}
message = json.dumps(message)
self.task_queue.put(message)
print('[ UI ][ RESPONSE ] Sent Verdict for Run ', run_id)
# All verdicts are released
self.data_changed_flags[5] = 0
return
def convert_to_seconds(time_str):
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
def update_tables(self):
try:
interface_updates.update_table_contained(self)
except Exception as e:
print('[ UI ] Table updation error: ', e)
finally:
return
def set_table_data(self):
account_data = self.db_list[0]
sub_data = self.db_list[1]
client_data = self.db_list[2]
judge_data = self.db_list[3]
query_data = self.db_list[4]
score_data = self.db_list[5]
problem_data = self.db_list[6]
self.account_model.setRowCount(len(account_data))
for i in range (len(account_data)):
for j in range(len(account_data[i])):
self.account_model.setItem(i, j, QTableWidgetItem(account_data[i][j]))
# Enable sorting in the table view
self.account_model.setSortingEnabled(True)
self.sub_model.setRowCount(len(sub_data))
for i in range (len(sub_data)):
for j in range(len(sub_data[i])):
self.sub_model.setItem(i, j, QTableWidgetItem(str(sub_data[i][j])))
self.client_model.setRowCount(len(client_data))
for i in range (len(client_data)):
for j in range(len(client_data[i])):
self.client_model.setItem(i, j, QTableWidgetItem(str(client_data[i][j])))
self.judge_model.setRowCount(len(judge_data))
for i in range (len(judge_data)):
for j in range(len(judge_data[i])):
self.judge_model.setItem(i, j, QTableWidgetItem(str(judge_data[i][j])))
self.query_model.setRowCount(len(query_data))
for i in range (len(query_data)):
for j in range(len(query_data[i])):
self.query_model.setItem(i, j, QTableWidgetItem(str(query_data[i][j])))
self.score_model.setRowCount(len(score_data))
for i in range (len(score_data)):
for j in range(len(score_data[i])):
self.score_model.setItem(i, j, QTableWidgetItem(str(score_data[i][j])))
self.problem_model.setRowCount(len(problem_data))
for i in range (len(problem_data)):
for j in range(len(problem_data[0])):
self.problem_model.setItem(i, j, QTableWidgetItem(str(problem_data[i][j])))
def set_button_behavior(self, status):
if status == "SETUP":
self.set_status('SETUP')
self.setWindowTitle('BitsOJ v1.0.1 [ SERVER ][ SETUP ]')
self.data_changed_flags[10] = 0
contest_duration = '00:00:00'
self.timer_widget.display(contest_duration)
self.contest_time_entry.setReadOnly(0)
self.contest_time_entry.setToolTip(
'Set Contest duration. Press Set to confirm.' +
'\nYou will not be able to edit this when contest starts.'
)
self.change_time_entry.setReadOnly(False)
self.change_time_entry.setToolTip('You will be able to use it when contest is STARTED')
self.set_button.setEnabled(True)
self.set_button.setToolTip('Set contest time.\nThis does NOT broadcast to clients.')
self.start_button.setEnabled(True)
self.start_button.setToolTip('START the contest and broadcast to all clients.')
self.stop_button.setEnabled(False)
self.stop_button.setToolTip('STOP the contest and broadcast to all clients.\nDisabled until contest Starts')
self.update_button.setEnabled(False)
self.update_button.setToolTip('UPDATE contest time and broadcast to all clients.\nDisabled until contest Starts')
self.server_reset_button.setEnabled(True)
self.server_reset_button.setToolTip('RESET the server.')
self.account_reset_button.setEnabled(True)
self.submission_reset_button.setEnabled(True)
self.query_reset_button.setEnabled(True)
self.client_reset_button.setEnabled(True)
self.delete_account_button.setEnabled(True)
self.timer_reset_button.setEnabled(False)
self.timer_reset_button.setToolTip('You can reset timer when contest is STOPPED.')
if status == "RUNNING":
self.set_status('RUNNING')
self.setWindowTitle('BitsOJ v1.0.1 [ SERVER ][ RUNNING ]')
self.data_changed_flags[10] = 1
self.contest_time_entry.setReadOnly(1)
self.contest_time_entry.setToolTip('Contest has STARTED.\nYou can\'t edit this value now.')
self.change_time_entry.setReadOnly(False)
self.change_time_entry.setToolTip('Extend/Shorten contest (in minutes).\nPress UPDATE to confirm.')
self.set_button.setEnabled(False)
self.set_button.setToolTip('Contest has STARTED.\nYou can not set time now!')
self.start_button.setEnabled(False)
self.start_button.setToolTip('Contest is already running!')
self.stop_button.setEnabled(True)
self.stop_button.setToolTip('STOP the contest.')
self.update_button.setEnabled(True)
self.update_button.setToolTip('Update the contest.')
self.server_reset_button.setEnabled(False)
self.server_reset_button.setToolTip('RESET the server.\nCan only be used when contest\nis not RUNNING.')
self.account_reset_button.setEnabled(False)
self.submission_reset_button.setEnabled(False)
self.query_reset_button.setEnabled(False)
self.client_reset_button.setEnabled(True)
self.delete_account_button.setEnabled(False)
self.timer_reset_button.setEnabled(False)
elif status == "STOPPED":
self.set_status('STOPPED')
self.setWindowTitle('BitsOJ v1.0.1 [ SERVER ][ STOPPED ]')
self.data_changed_flags[10] = 2
self.contest_time_entry.setReadOnly(1)
self.contest_time_entry.setToolTip('Contest has STOPPED.\nYou can\'t edit this value now.')
self.update_button.setEnabled(False)
self.update_button.setToolTip('Contest has STOPPED.\nYou can not extend it now.')
self.stop_button.setEnabled(False)
self.stop_button.setToolTip('Contest has already STOPPED!')
self.start_button.setEnabled(False)
self.start_button.setToolTip('Contest has STOPPED!')
self.set_button.setEnabled(False)
self.set_button.setToolTip('Contest has STOPPED!')
self.change_time_entry.setReadOnly(True)
self.change_time_entry.setToolTip('Contest has STOPPED.\nYou can not change time now!')
self.server_reset_button.setEnabled(True)
self.server_reset_button.setToolTip('RESET the server.')
self.account_reset_button.setEnabled(True)
self.submission_reset_button.setEnabled(True)
self.query_reset_button.setEnabled(True)
self.client_reset_button.setEnabled(True)
self.delete_account_button.setEnabled(True)
self.timer_reset_button.setEnabled(True)
self.timer_reset_button.setToolTip('Reset Contest timer')
return
def process_event(self, data, extra_data = 'None'):
if data == 'SET':
print('[ SET ] Contest Duration : ' + str(extra_data))
self.log('[ SET ] Contest Duration : ' + str(extra_data))
save_status.update_entry('Contest Duration', str(extra_data))
contest_start_time = time.strftime("%H:%M:%S", time.localtime(time.time()))
save_status.update_entry('Contest Start Time', contest_start_time)
self.timer_widget.display(extra_data)
self.duration = str(extra_data)
elif data == 'START':
current_time = time.localtime()
self.contest_start_time = time.time()
self.contest_duration_seconds = server_window.convert_to_seconds(self.duration)
self.contest_set_time = self.contest_duration_seconds + self.contest_start_time
contest_end_time = time.strftime("%H:%M:%S", time.localtime(self.contest_set_time))
contest_start_time = time.strftime("%H:%M:%S", time.localtime(self.contest_start_time))
message = {
'Code' : 'START',
'Duration' : extra_data,
'Start Time' : contest_start_time,
'End Time' : contest_end_time,
'Receiver' : 'All'
}
message = json.dumps(message)
# Put START message in task_queue so that it is broadcasted to all the clients.
self.task_queue.put(message)
# Update GUI
server_window.set_button_behavior(self, 'RUNNING')
# Update config file
current_time = str(time.strftime("%H:%M:%S", current_time))
save_status.update_entry('Contest Start Time', current_time)
save_status.update_entry('Contest Status', 'RUNNING')
save_status.update_entry('Contest Duration', extra_data)
save_status.update_entry('Contest Set Time', self.contest_set_time)
save_status.update_entry('Contest End Time', contest_end_time)
# Broadcast Scoreboard
self.data_changed_flags[18] = 1
elif data == 'STOP':
current_time = time.localtime()
message = {
'Code' : 'STOP'
}
message = json.dumps(message)
self.task_queue.put(message)
# Update GUI
self.set_button_behavior('STOPPED')
# Update config file
current_time = str(time.strftime("%H:%M:%S", current_time))
save_status.update_entry('Contest End Time', current_time)
save_status.update_entry('Contest Status', 'STOPPED')
elif data == 'UPDATE':
# Send UPDATE signal
print('[ UPDATE ] Contest time ' + str(extra_data))
self.log('[ UPDATE ] Contest time ' + str(extra_data))
self.contest_set_time = self.contest_set_time + int(extra_data) * 60
# self.data_changed_flags[19] = 1
message = {
'Code' : 'EXTND',
'Time' : extra_data
}
message = json.dumps(message)
self.task_queue.put(message)
prev_duration = self.duration
new_duration = server_window.convert_to_seconds(prev_duration) + (int(extra_data) * 60)
new_duration = server_window.convert_to_hhmmss(new_duration)
new_end_time = time.strftime("%H:%M:%S", time.localtime(self.contest_set_time))
save_status.update_entry('Contest Set Time', self.contest_set_time)
save_status.update_entry('Contest End Time', new_end_time)
save_status.update_entry('Contest Duration', new_duration)
return
def convert_to_hhmmss(seconds):
seconds = int(seconds)
h = int(seconds / 3600)
m = int((seconds % 3600) / 60)
s = int(((seconds % 3600) % 60))
if h <= 9:
h = '0' + str(h)
if m <= 9:
m = '0' + str(m)
if s <= 9:
s = '0' + str(s)
return str(h) + ':' + str(m) + ':' + str(s)
def allow_login_handler(self, state):
if(state == Qt.Checked):
# Allow logins
self.set_flags(2, 1)
else:
# Stop logins
self.set_flags(2, 0)
return
def allow_ip_change_handler(self, state):
if(state == Qt.Checked):
# Allow logins
self.set_flags(27, 1)
print('[ SET ] IP address change allowed.')
self.log_queue.put('[ SET ] IP address change allowed.')
else:
# Stop logins
self.set_flags(27, 0)
print('[ SET ] IP address change not allowed.')
self.log_queue.put('[ SET ] IP address change not allowed.')
return
def allow_same_ip_handler(self, state):
if(state == Qt.Checked):
# Allow logins
self.set_flags(14, 1)
print('[ SET ] Same IP not allowed.')
self.log_queue.put('[ SET ] Same IP not allowed.')
else:
# Stop logins
self.set_flags(14, 0)
print('[ SET ] Same IP allowed.')
self.log_queue.put('[ SET ] Same IP allowed.')
return
def allow_judge_login_handler(self, state):
if(state == Qt.Checked):
# Allow logins
self.set_flags(12, 1)
else:
# Stop logins
self.set_flags(12, 0)
return
def allow_submissions_handler(self, state):
if(state == Qt.Checked):
# Allow submissions
self.set_flags(3, 1)
else:
# Stop submissions
self.set_flags(3, 0)
return
def manual_reviews_handler(self, state):
if(state == Qt.Checked):
# Allow submissions review
self.set_flags(20, 1)
else:
self.set_flags(20, 0)
buttonReply = QMessageBox.question(
self,
'Manual Review Turn OFF',
'Release all under REVIEW verdicts?',
QMessageBox.Yes | QMessageBox.No, # Button Options
QMessageBox.No # Default button
)
if buttonReply == QMessageBox.No:
return
self.set_flags(25, 1)
return
def allow_scoreboard_update_handler(self, state):
if(state == Qt.Checked):
# Allow scoreboard update
self.set_flags(15, 1)
else:
# Stop scoreboard update
self.set_flags(15, 0)
return
def check_login_allowed(self):
if self.data_changed_flags[2] == 1:
return True
return False
def check_judge_login_allowed(self):
if self.data_changed_flags[12] == 1:
return True
return False
def check_submission_allowed(self):
if self.data_changed_flags[3] == 1:
return True
return False
def check_scoreboard_update_allowed(self):
if self.data_changed_flags[15] == 1:
return True
return False
def check_manual_review_allowed(self):
if self.data_changed_flags[20] == 1:
return True
return False
def set_flags(self, index, value):
self.data_changed_flags[index] = value
return
@pyqtSlot()
def manual_broadcast_scoreboard(self):
# Set broadcast scoreboard flag
self.data_changed_flags[18] = 1
info_box = QMessageBox()
info_box.setIcon(QMessageBox.Information)
info_box.setWindowTitle('Alert')
info_box.setText('Scoreboard broadcasted!')
info_box.setStandardButtons(QMessageBox.Ok)
info_box.exec_()
return
@pyqtSlot()
def manage_submission(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
# Get data from selected row
# run_id, client_id, problem_code, language, timestamp, verdict, sent_status
run_id = self.sub_model.selectedIndexes()[0].data()
client_id = self.sub_model.selectedIndexes()[1].data()
problem_code = self.sub_model.selectedIndexes()[2].data()
language = self.sub_model.selectedIndexes()[3].data()
timestamp = self.sub_model.selectedIndexes()[4].data()
verdict = self.sub_model.selectedIndexes()[5].data()
sent_status = self.sub_model.selectedIndexes()[6].data()
if client_id == None:
pass
else:
self.window = manage_submission_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue,
run_id,
client_id,
problem_code,
language,
timestamp,
verdict,
sent_status
)
self.window.show()
except Exception as error:
print('[ ERROR ] : ' + str(error))
self.log('[ ERROR ] : ' + str(error))
finally:
return
@pyqtSlot()
def query_reply(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
# Get data from selected row
query_id = self.query_model.selectedIndexes()[0].data()
client_id = self.query_model.selectedIndexes()[1].data()
query = self.query_model.selectedIndexes()[2].data()
reply = self.query_model.selectedIndexes()[3].data()
if query_id == None:
query_id = -1
mode = 0
if client_id == '0':
mode = 1
self.window = query_reply_ui(self.data_changed_flags,self.task_queue, query, reply, client_id, query_id, self.log_queue, mode)
self.window.show()
except Exception as error:
print('[ ERROR ] : ' + str(error))
self.log('[ ERROR ] : ' + str(error))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
finally:
return
@pyqtSlot()
def announcement(self):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
query = 'Announcement'
reply = ''
client_id = -1
query_id = -1
self.window = query_reply_ui(self.data_changed_flags,self.task_queue ,query, reply, client_id, query_id, self.log_queue)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ] : ' + str(error))
self.log('[ UI ][ ERROR ] : ' + str(error))
finally:
return
@pyqtSlot()
def create_accounts(self):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
self.window = new_accounts_ui(self.data_changed_flags, self.task_queue, self.log_queue)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ] : ' + str(error))
self.log('[ UI ][ ERROR ] : ' + str(error))
finally:
return
@pyqtSlot()
def rejudge_problem(self):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
codes = self.config['Problem Codes']
client_list = []
client_list = client_authentication.get_connected_clients()
self.window = rejudge_problem_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue,
codes,
client_list
)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ] : ' + str(error))
self.log('[ UI ][ ERROR ] : ' + str(error))
finally:
return
@pyqtSlot()
def import_export_accounts(self):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
self.window = ie_accounts_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue
)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ] : ' + str(error))
self.log('[ UI ][ ERROR ] : ' + str(error))
finally:
return
@pyqtSlot()
def password_verification(self):
password = self.admin_password
input_dialog = QInputDialog()
input_dialog.setFixedSize(600, 400)
user_input, button_pressed_flag = input_dialog.getText(
self, "Authentication", "Enter Contest Password: ", QLineEdit.Password, ""
)
if button_pressed_flag:
if self.validate_password(user_input):
return 1
else:
self.log('[ SECURITY ] Password verification failed.')
return 0
return 2
def validate_password(self, input):
if input == self.admin_password:
return 1
return 0
@pyqtSlot()
def edit_client(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
# If no row is selected, return
try:
client_id = self.client_model.selectedIndexes()[0].data()
username = self.client_model.selectedIndexes()[1].data()
password = self.client_model.selectedIndexes()[2].data()
ip = self.client_model.selectedIndexes()[3].data()
state = self.client_model.selectedIndexes()[4].data()
if username == None or client_id == None or password == None or state == None:
pass
else:
self.window = account_edit_ui(self.data_changed_flags, self.task_queue, self.log_queue, client_id, username, password, state, ip)
self.window.show()
except Exception as error:
print('[ ERROR ]' + str(error))
self.log('[ ERROR ]' + str(error))
finally:
return
@pyqtSlot()
def view_judge(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
# If no row is selected, return
try:
judge_id = self.judge_model.selectedIndexes()[0].data()
username = self.judge_model.selectedIndexes()[1].data()
password = self.judge_model.selectedIndexes()[2].data()
ip = self.judge_model.selectedIndexes()[3].data()
state = self.judge_model.selectedIndexes()[4].data()
if username == None or judge_id == None or password == None or state == None:
pass
else:
self.window = judge_view_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue,
judge_id,
username,
password,
state,
ip
)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ]' + str(error))
self.log('[ UI ][ ERROR ]' + str(error))
finally:
return
@pyqtSlot()
def edit_account(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
# If no row is selected, return
try:
username = self.account_model.selectedIndexes()[0].data()
password = self.account_model.selectedIndexes()[1].data()
ctype = self.account_model.selectedIndexes()[2].data()
if username == None or password == None or ctype == None:
pass
else:
# print("Sending ", username, password, ctype)
self.window = password_change_ui(self.data_changed_flags, self.task_queue, self.log_queue, username, password, ctype)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ][ EDIT ] ' + str(error))
self.log('[ UI ][ ERROR ][ EDIT ] ' + str(error))
finally:
return
@pyqtSlot()
def generate_report(self):
try:
# Close any previous sub-window
self.window.close()
except:
pass
if self.data_changed_flags[10] != 2:
info_box = QMessageBox()
info_box.setIcon(QMessageBox.Information)
info_box.setWindowTitle('Alert')
info_box.setText('Reports can only be generated when contest has Stopped.')
info_box.setStandardButtons(QMessageBox.Ok)
info_box.exec_()
return
try:
contest_name = self.config['Contest Name']
theme = self.config['Contest Theme']
except:
contest_name = 'BitsOJ Contest'
contest_theme = ' '
try:
self.window = generate_report_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue,
self.config
)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ][ REPORT ] ' + str(error))
self.log('[ UI ][ ERROR ][ REPORT ] ' + str(error))
finally:
return
@pyqtSlot()
def view_problem(self, selected_row):
try:
# Close any previous sub-window
self.window.close()
except:
pass
try:
problem = self.problem_model.selectedIndexes()[0].data()
code = self.problem_model.selectedIndexes()[1].data()
test_files = int(self.problem_model.selectedIndexes()[2].data())
time_limit = int(self.problem_model.selectedIndexes()[3].data())
if problem == None:
pass
else:
self.window = problem_edit_ui(
self.data_changed_flags,
self.task_queue,
self.log_queue,
problem,
code,
test_files,
time_limit
)
self.window.show()
except Exception as error:
print('[ UI ][ ERROR ]' + str(error))
self.log('[ UI ][ ERROR ]' + str(error))
finally:
return
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Code related to database reset
@pyqtSlot()
def delete_account(self, selected_row):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
username = str(self.account_model.selectedIndexes()[0].data())
except:
# Reset data_changed_flag for deletion of account
self.data_changed_flags[6] = 0
return
message = "Are you sure you want to delete : " + username + " ? "
custom_box = QMessageBox()
custom_box.setIcon(QMessageBox.Critical)
custom_box.setWindowTitle('Confirm Deletion')
custom_box.setText(message)
custom_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_box.setDefaultButton(QMessageBox.No)
button_yes = custom_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_box.exec_()
if custom_box.clickedButton() == button_yes:
# Delete from accounts table and connected clients table
message = {
'Code' : 'DelUsr',
'Client' : username
}
message = json.dumps(message)
self.task_queue.put(message)
# Broadcast this user disconnection
message = {
'Code' : 'DSCNT',
'Mode' : 1,
'Client' : username
}
message = json.dumps(message)
self.task_queue.put(message)
elif custom_box.clickedButton() == button_no :
pass
# Reset critical flag
self.data_changed_flags[6] = 0
return
def reset_accounts(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
message = "Are you sure you want to DELETE ALL accounts?"
message += "\nAll connected clients will also be disconnected!"
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('Confirm RESET')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] All Client + Judge disconnection under progress...')
self.log('[ EVENT ] All Client + Judge disconnection under progress...')
message = {
'Code' : 'DSCNT',
'Mode' : 2
}
message = json.dumps(message)
self.task_queue.put(message)
print('[ EVENT ] Disconnecting all accounts...')
self.log('[ EVENT ] Disconnecting all accounts...')
message = {
'Code' : 'AccReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.account_model.clearContents()
self.account_model.setRowCount(0)
elif custom_close_box.clickedButton() == button_no :
pass
except:
print('[ UI ][ ERROR ] Could not reset database!')
self.log('[ UI ][ ERROR ] Could not reset database!')
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
def disconnect_all(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
status = self.password_verification()
if status == 1:
pass
elif status == 2:
self.data_changed_flags[6] = 0
return
else:
self.data_changed_flags[6] = 0
QMessageBox.about(self, "Access Denied!", "Authentication failed!")
return
# If no row is selected, return
try:
message = "Are you sure to DISCONNECT all Clients and Judges?\nClients will be able to login again if permitted."
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('Disconnect Clients')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] All Client + Judge disconnection under progress...')
self.log('[ EVENT ] All Client + Judge disconnection under progress...')
message = {
'Code' : 'DSCNT',
'Mode' : 2
}
message = json.dumps(message)
self.task_queue.put(message)
self.client_model.clearContents()
self.client_model.setRowCount(0)
self.judge_model.clearContents()
self.judge_model.setRowCount(0)
elif custom_close_box.clickedButton() == button_no :
pass
except Exception as error:
print('Could not reset database : ' + str(error))
self.log('Could not reset database : ' + str(error))
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
def reset_submissions(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
message = "Are you sure you want to DELETE ALL submissions?"
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('Confirm RESET')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] Submission Reset...')
self.log('[ EVENT ] Submission Reset...')
message = {
'Code' : 'SubReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.sub_model.clearContents()
self.sub_model.setRowCount(0)
elif custom_close_box.clickedButton() == button_no :
pass
except Exception as error:
print('[ UI ][ ERROR ] Could not reset database : ' + str(error))
self.log('[ UI ][ ERROR ] Could not reset database : ' + str(error))
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
def reset_queries(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
message = "Are you sure you want to DELETE ALL queries?"
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('Confirm RESET')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] Queries Reset...')
self.log('[ EVENT ] Queries Reset...')
message = {
'Code' : 'QueryReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.query_model.clearContents()
self.query_model.setRowCount(0)
elif custom_close_box.clickedButton() == button_no :
pass
except Exception as error:
print('[ UI ][ ERROR ] Could not reset database : ' + str(error))
self.log('[ UI ][ ERROR ] Could not reset database : ' + str(error))
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
def reset_server(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
message = "Are you sure to RESET the server?\nContest Information will be lost"
extra_data = "You should create the contest report first!"
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('SERVER RESET')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] SERVER RESET TRIGGERED')
self.log('[ EVENT ] SERVER RESET TRIGGERED')
print('[ RESET ] Disconnecting all Connected Clients...')
self.log('[ RESET ] Disconnecting all Connected Clients...')
message = {
'Code' : 'DSCNT',
'Mode' : 2
}
message = json.dumps(message)
self.task_queue.put(message)
self.client_model.clearContents()
self.client_model.setRowCount(0)
print('[ RESET ] Disconnecting all Connected Judges...')
self.log('[ RESET ] Disconnecting all Connected Judges...')
message = {
"Code" : 'JDSCNT',
"Judge" : '__ALL__'
}
message = json.dumps(message)
self.task_queue.put(message)
self.judge_model.clearContents()
self.judge_model.setRowCount(0)
# Reset accounts
print('[ RESET ] Deleting all User Accounts...')
self.log('[ RESET ] Deleting all User Accounts...')
message = {
'Code' : 'AccReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.account_model.clearContents()
self.account_model.setRowCount(0)
# Reset submissions
print('[ RESET ] Resetting Submissions...')
self.log('[ RESET ] Resetting Submissions...')
# Reset Scoreboard
print('[ RESET ] Clearing scoreboard...')
self.log('[ RESET ] Clearing scoreboard...')
message = {
'Code' : 'SubReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.sub_model.clearContents()
self.sub_model.setRowCount(0)
# Update Queries View
print('[ RESET ] Resetting Queries...')
self.log('[ RESET ] Resetting Queries...')
message = {
'Code' : 'QueryReset'
}
message = json.dumps(message)
self.task_queue.put(message)
self.query_model.clearContents()
self.query_model.setRowCount(0)
print('[ RESET ] Reset environment...')
self.log('[ RESET ] Reset environment...')
server_window.set_button_behavior(self, 'SETUP')
save_status.update_entry('Contest Duration', '00:00:00')
save_status.update_entry('Contest Status', 'SETUP')
save_status.update_entry('Contest Start Time', '00:00:00')
save_status.update_entry('Contest End Time', '00:00:00')
save_status.update_entry('Contest Set Time', 0)
elif custom_close_box.clickedButton() == button_no :
pass
except Exception as error:
print('[ CLOSE ][ ERROR ] Could not reset server : ' + str(error))
self.log('[ CLOSE ][ ERROR ] Could not reset server : ' + str(error))
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
def reset_timer(self):
if self.data_changed_flags[6] == 0:
# Set critical flag
self.data_changed_flags[6] = 1
else:
# If one data deletion window is already opened, process it first.
return
# If no row is selected, return
try:
message = "Are you sure to RESET the timer?"
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('TIMER RESET')
custom_close_box.setText(message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
print('[ EVENT ] TIMER RESET TRIGGERED')
self.log('[ EVENT ] TIMER RESET TRIGGERED')
print('[ RESET ] Reset environment...')
self.log('[ RESET ] Reset environment...')
server_window.set_button_behavior(self, 'SETUP')
save_status.update_entry('Contest Duration', '00:00:00')
save_status.update_entry('Contest Status', 'SETUP')
save_status.update_entry('Contest Start Time', '00:00:00')
save_status.update_entry('Contest End Time', '00:00:00')
save_status.update_entry('Contest Set Time', 0)
elif custom_close_box.clickedButton() == button_no :
pass
except Exception as error:
print('[ ERROR ] Could not reset timer : ' + str(error))
self.log('[ ERROR ] Could not reset timer : ' + str(error))
finally:
# Reset critical flag
self.data_changed_flags[6] = 0
return
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
###################################################
def set_status(self, message = 'SETUP'):
self.status.showMessage(' BitsOJ Server > ' + message)
def closeEvent(self, event):
# If server exit is called, accept
if self.data_changed_flags[23] == 1:
event.accept()
sys.exit()
# If lock is set, ignore
if self.data_changed_flags[24] == 1:
QMessageBox.about(self, "Access Denied", "Server is locked!")
self.log('[ SECURITY ] Server Close attempt -> Denied: Server was locked.')
event.ignore()
return
# If contest is running,
if self.data_changed_flags[10] == 1:
status = self.password_verification()
if status == 1:
pass
elif status == 2:
event.ignore()
return
else:
QMessageBox.about(self, "Access Denied", "Authentication failed!")
self.log('[ SECURITY ] Server Close attempt -> Denied: Password mismatch.')
event.ignore()
return
message = "Pressing 'Yes' will SHUT the Server."
info_message = (
"No requests will be processed while it is closed.\n" +
"Are you sure you want to exit?"
)
detail_message =(
"Server will resume the contest when restarted.\n"
)
custom_close_box = QMessageBox()
custom_close_box.setIcon(QMessageBox.Critical)
custom_close_box.setWindowTitle('Warning!')
custom_close_box.setText(message)
custom_close_box.setInformativeText(info_message)
custom_close_box.setDetailedText(detail_message)
custom_close_box.setStandardButtons(QMessageBox.Yes|QMessageBox.No)
custom_close_box.setDefaultButton(QMessageBox.No)
button_yes = custom_close_box.button(QMessageBox.Yes)
button_yes.setText('Yes')
button_no = custom_close_box.button(QMessageBox.No)
button_no.setText('No')
button_yes.setObjectName("close_button_yes")
button_no.setObjectName("close_button_no")
button_yes.setStyleSheet(open('Interface/style.qss', "r").read())
button_no.setStyleSheet(open('Interface/style.qss', "r").read())
custom_close_box.exec_()
if custom_close_box.clickedButton() == button_yes:
self.data_changed_flags[7] = 1
self.log('[ SECURITY ] Server Close attempt -> Accepted.')
event.accept()
elif custom_close_box.clickedButton() == button_no :
event.ignore()
class init_gui(server_window):
def __init__(self, data_changed_flags, task_queue, log_queue, update_queue, db_list, lock):
# make a reference of App class
app = QApplication(sys.argv)
app.setStyle("Fusion")
app.setStyleSheet(open('Interface/style.qss', "r").read())
# If user is about to close window
app.aboutToQuit.connect(self.closeEvent)
server_app = server_window(data_changed_flags, task_queue, log_queue, update_queue, db_list, lock)
server_app.showMaximized()
# Splash ends
# Execute the app mainloop
app.exec_()
return
|
_utils.py
|
"""
Various helpers. For internal use only.
"""
import collections
import traceback
import sys
import queue
import logging
import threading
from ._importer import logger, get_debug_level, set_debug_level
# Export utilities implemented in importer module
logger = logger
get_debug_level = get_debug_level
set_debug_level = set_debug_level
class _AtomicCounter:
def __init__(self, start=0):
self._value = start
self._lock = threading.Lock()
def __next__(self):
with self._lock:
value = self._value
self._value += 1
return value
def __iter__(self):
return self
atomic_count = _AtomicCounter
class IdentityAdapter(logging.LoggerAdapter):
def __init__(self, logger, identity):
super().__init__(logger, {})
self._prefix = '[{}] '.format(identity)
def process(self, msg, kwargs):
return self._prefix + msg, kwargs
# Lightweight version of concurrent.futures.Future
class Future:
def __init__(self):
self._done = threading.Event()
self._result = None
self._exc = None
def result(self):
self._done.wait()
if self._exc is not None:
raise self._exc
return self._result
def set_result(self, result):
self._result = result
self._done.set()
def set_exception(self, exc):
self._exc = exc
self._done.set()
# Runs functions in worker threads.
class Executor:
def __init__(self, idle_timeout=10.0):
self._lock = threading.Lock()
self._tasks = collections.deque()
self._idle_condition = threading.Condition(self._lock)
self._num_idle_threads = 0
self._num_threads = 0
self._idle_timeout = idle_timeout
self._joinable_thread = None
self._closing = False
self._close_condition = threading.Condition(self._lock)
def submit(self, func, *args):
with self._lock:
if self._closing:
raise ValueError('Executor is closed')
if self._num_idle_threads > 0:
self._tasks.append((func, args))
self._num_idle_threads -= 1
self._idle_condition.notify()
else:
thread = threading.Thread(target=self._thread_func, args=(func, args), daemon=True)
thread.start()
self._num_threads += 1
def _thread_func(self, func, args):
while True:
try:
func(*args)
# All exceptions from threading.Thread.run() method are ignored, here we emulate that behavior.
except BaseException:
ignore_exception_at(func)
with self._lock:
if self._closing:
break
self._num_idle_threads += 1
while not self._tasks and not self._closing:
if not self._idle_condition.wait(self._idle_timeout) and self._num_idle_threads > 0:
self._num_idle_threads -= 1
break
if not self._tasks:
break
func, args = self._tasks.popleft()
with self._lock:
self._num_threads -= 1
if self._closing and self._num_threads == 0:
self._close_condition.notify_all()
if self._joinable_thread:
self._joinable_thread.join()
self._joinable_thread = threading.current_thread()
def close(self):
with self._lock:
self._closing = True
self._num_idle_threads = 0
self._idle_condition.notify_all()
while self._num_threads > 0:
self._close_condition.wait()
if self._joinable_thread:
self._joinable_thread.join()
self._joinable_thread = None
# based on PyErr_WriteUnraisable
def ignore_exception_at(obj):
try:
print('Exception ignored in: {!r}'.format(obj), file=sys.stderr)
traceback.print_exc()
except OSError:
pass
# Variant of traceback.format_exception_only that really formats only the exception. For SyntaxError,
# traceback.format_exception_only outputs also some additional lines that pretend to be a part of the traceback.
def format_exception_only(error):
if isinstance(error, SyntaxError):
return 'SyntaxError: {}'.format(error)
return ''.join(traceback.format_exception_only(type(error), error)).strip()
# Synchronized queue implementation that can be safely used from within __del__ methods, as opposed to
# queue.Queue (https://bugs.python.org/issue14976). In Python 3.7, queue.SimpleQueue can be used instead.
#
# This class was written with CPython in mind, and may not work correctly with other Python implementations.
class SimpleQueue:
def __init__(self):
self.__lock = threading.Lock()
self.__waiter = threading.Lock()
self.__items = collections.deque()
# Create bound method objects for later use in get()/put() - creating such objects may trigger GC,
# which must not happen inside get()/put(). These methods are all written in C, and are expected not to
# allocate GC memory internally (e.g. with PyObject_GC_New).
self.__lock_acquire = self.__lock.acquire
self.__lock_release = self.__lock.release
self.__waiter_acquire = self.__waiter.acquire
self.__waiter_release = self.__waiter.release
self.__items_popleft = self.__items.popleft
self.__items_append = self.__items.append
self.__waiter_acquire()
# Wait until a queue becomes non-empty, and retrieve a single element from the queue. This function may be
# called only from a single thread at a time.
def get(self, timeout=None):
if timeout is None:
self.__waiter_acquire()
elif not self.__waiter_acquire(timeout=timeout):
raise queue.Empty
self.__lock_acquire()
item = self.__items_popleft()
if self.__items:
self.__waiter_release()
self.__lock_release()
return item
# Insert a single element at the end of the queue. This method may be safely called from multiple threads,
# and from __del__ methods.
def put(self, item):
self.__lock_acquire()
if not self.__items:
self.__waiter_release()
self.__items_append(item)
self.__lock_release()
|
gps_main.py
|
""" This file defines the main object that runs experiments. """
import logging
import imp
import os
import os.path
import sys
import copy
import argparse
import threading
import time
import traceback
import matplotlib as mpl
sys.path.append('/'.join(str.split(__file__, '/')[:-2]))
# Add gps/python to path so that imports work.
from gps.utility.data_logger import DataLogger
from gps.sample.sample_list import SampleList
from gps.gui.gps_training_gui import GPSTrainingGUI
mpl.use('Qt4Agg')
class GPSMain(object):
""" Main class to run algorithms and experiments. """
def __init__(self, config, quit_on_end=False):
"""
Initialize GPSMain
Args:
config: Hyperparameters for experiment
quit_on_end: When true, quit automatically on completion
"""
self._quit_on_end = quit_on_end
self._hyperparams = config
self._conditions = config['common']['conditions']
#self._condition = 1
if 'train_conditions' in config['common']:
#False
self._train_idx = config['common']['train_conditions']
self._test_idx = config['common']['test_conditions']
else:
self._train_idx = range(self._conditions)
config['common']['train_conditions'] = config['common']['conditions']
#create a new key in the dictionary common and assign the value 1
self._hyperparams =config
#reinitiallizing the hyperparameters because the config was changed
self._test_idx = self._train_idx
#getting hte train index again
self._data_files_dir = config['common']['data_files_dir']
#getting the data file path from which is stored in the common dic
self.agent = config['agent']['type'](config['agent'])
#here it creat the object from the agent directory
#print(self.agent,'self.agent')
self.data_logger = DataLogger()
#here the gui files leads to the
self.gui = GPSTrainingGUI(config['common']) if config['gui_on'] else None
#again with they change the config file now adding object to the dic
config['algorithm']['agent'] = self.agent
self.algorithm = config['algorithm']['type'](config['algorithm'])
#print(config['algorithm']['type'](config['algorithm']),'self.algo')
# gps.algorithm.algorithm_traj_opt.AlgorithmTrajOpt is the algorithm which is used
def run(self, itr_load=None):
"""
Run training by iteratively sampling and taking an iteration.
Args:
itr_load: If specified, loads algorithm state from that
iteration, and resumes training at the next iteration.
Returns: None
"""
#this is the callable function which is used in the main.
try:
#self._initialize is the function which opens the GUI and the itr_stat
#this itr_start is the provided by the user and is reaassigned into
#itr_start
itr_start = self._initialize(itr_load)
#print(itr_start,'iteration start',self._initialize,'this is to initialize some')
for itr in range(itr_start, self._hyperparams['iterations']):
#basically the iteration starts from the iteration given by run
#by the user and the ends at iteration in the config of the
#hyperparameters file in this case 5
for cond in self._train_idx:
# this is the conditions offered in the training index in
# case point = 0
for i in range(self._hyperparams['num_samples']):
#again this is 5
print('wow wow wow wow wow wow wow wow')
self._take_sample(itr, cond, i)
traj_sample_lists = [
#this function in the agent super class, this function instantiates the sample.py file
self.agent.get_samples(cond, -self._hyperparams['num_samples'])
for cond in self._train_idx
]
print(traj_sample_lists,'Ed-sheerens')
# Clear agent samples.
#this function is in the agent superclass.
self.agent.clear_samples()
self._take_iteration(itr, traj_sample_lists)
pol_sample_lists = self._take_policy_samples()
self._log_data(itr, traj_sample_lists, pol_sample_lists)
except Exception as e:
traceback.print_exception(*sys.exc_info())
finally:
self._end()
def test_policy(self, itr, N):
"""
Take N policy samples of the algorithm state at iteration itr,
for testing the policy to see how it is behaving.
(Called directly from the command line --policy flag).
Args:
itr: the iteration from which to take policy samples
N: the number of policy samples to take
Returns: None
"""
algorithm_file = self._data_files_dir + 'algorithm_itr_%02d.pkl' % itr
self.algorithm = self.data_logger.unpickle(algorithm_file)
if self.algorithm is None:
print("Error: cannot find '%s.'" % algorithm_file)
os._exit(1) # called instead of sys.exit(), since t
traj_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('traj_sample_itr_%02d.pkl' % itr))
pol_sample_lists = self._take_policy_samples(N)
self.data_logger.pickle(
self._data_files_dir + ('pol_sample_itr_%02d.pkl' % itr),
copy.copy(pol_sample_lists)
)
if self.gui:
self.gui.update(itr, self.algorithm, self.agent,
traj_sample_lists, pol_sample_lists)
self.gui.set_status_text(('Took %d policy sample(s) from ' +
'algorithm state at iteration %d.\n' +
'Saved to: data_files/pol_sample_itr_%02d.pkl.\n') % (N, itr, itr))
def _initialize(self, itr_load):
"""
Initialize from the specified iteration.
Args:
itr_load: If specified, loads algorithm state from that
iteration, and resumes training at the next iteration.
Returns:
itr_start: Iteration to start from.
"""
if itr_load is None:
if self.gui:
self.gui.set_status_text('Press \'go\' to begin.')
return 0
else:
algorithm_file = self._data_files_dir + 'algorithm_itr_%02d.pkl' % itr_load
self.algorithm = self.data_logger.unpickle(algorithm_file)
if self.algorithm is None:
print("Error: cannot find '%s.'" % algorithm_file)
os._exit(1) # called instead of sys.exit(), since this is in a thread
if self.gui:
traj_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('traj_sample_itr_%02d.pkl' % itr_load))
if self.algorithm.cur[0].pol_info:
pol_sample_lists = self.data_logger.unpickle(self._data_files_dir +
('pol_sample_itr_%02d.pkl' % itr_load))
else:
pol_sample_lists = None
self.gui.set_status_text(
('Resuming training from algorithm state at iteration %d.\n' +
'Press \'go\' to begin.') % itr_load)
return itr_load + 1
def _take_sample(self, itr, cond, i):
"""
Collect a sample from the agent.
Args:
itr: Iteration number.
cond: Condition number.
i: Sample number.
Returns: None
"""
if self.algorithm._hyperparams['sample_on_policy'] \
and self.algorithm.iteration_count > 0:
print(self.algorithm.iteration_count)
pol = self.algorithm.policy_opt.policy
else:
#print(self.algorithm.iteration_count)
pol = self.algorithm.cur[cond].traj_distr
print(self.algorithm.cur[cond].traj_distr,'what is the this dis_traj')
#print(self.algorithm.cur,'this is the pol',cond,'cond')
if self.gui:
self.gui.set_image_overlays(cond) # Must call for each new cond.
redo = True
while redo:
while self.gui.mode in ('wait', 'request', 'process'):
if self.gui.mode in ('wait', 'process'):
time.sleep(0.01)
continue
# 'request' mode.
if self.gui.request == 'reset':
try:
self.agent.reset(cond)
except NotImplementedError:
self.gui.err_msg = 'Agent reset unimplemented.'
elif self.gui.request == 'fail':
self.gui.err_msg = 'Cannot fail before sampling.'
self.gui.process_mode() # Complete request.
self.gui.set_status_text(
'Sampling: iteration %d, condition %d, sample %d.' %
(itr, cond, i)
)
#sampling is done in the agent node, here agent is agent_box2D and agent is the super class.
self.agent.sample(
pol, cond,
verbose=(i < self._hyperparams['verbose_trials'])
)
if self.gui.mode == 'request' and self.gui.request == 'fail':
redo = True
self.gui.process_mode()
self.agent.delete_last_sample(cond)
else:
redo = False
else:
self.agent.sample(
pol, cond,
verbose=(i < self._hyperparams['verbose_trials'])
)
def _take_iteration(self, itr, sample_lists):
"""
Take an iteration of the algorithm.
Args:
itr: Iteration number.
Returns: None
"""
if self.gui:
self.gui.set_status_text('Calculating.')
self.gui.start_display_calculating()
# this iteration is in the metaclass or parent class algorithm in the gps directory, here the sample_list is the data
#data that is collected by runnning the simulation for 5 steps.
self.algorithm.iteration(sample_lists)
if self.gui:
self.gui.stop_display_calculating()
def _take_policy_samples(self, N=None):
"""
Take samples from the policy to see how it's doing.
Args:
N : number of policy samples to take per condition
Returns: None
"""
if 'verbose_policy_trials' not in self._hyperparams:
# AlgorithmTrajOpt
return None
verbose = self._hyperparams['verbose_policy_trials']
if self.gui:
self.gui.set_status_text('Taking policy samples.')
pol_samples = [[None] for _ in range(len(self._test_idx))]
# Since this isn't noisy, just take one sample.
# TODO: Make this noisy? Add hyperparam?
# TODO: Take at all conditions for GUI?
for cond in range(len(self._test_idx)):
pol_samples[cond][0] = self.agent.sample(
self.algorithm.policy_opt.policy, self._test_idx[cond],
verbose=verbose, save=False, noisy=False)
return [SampleList(samples) for samples in pol_samples]
def _log_data(self, itr, traj_sample_lists, pol_sample_lists=None):
"""
Log data and algorithm, and update the GUI.
Args:
itr: Iteration number.
traj_sample_lists: trajectory samples as SampleList object
pol_sample_lists: policy samples as SampleList object
Returns: None
"""
if self.gui:
self.gui.set_status_text('Logging data and updating GUI.')
self.gui.update(itr, self.algorithm, self.agent,
traj_sample_lists, pol_sample_lists)
self.gui.save_figure(
self._data_files_dir + ('figure_itr_%02d.png' % itr)
)
if 'no_sample_logging' in self._hyperparams['common']:
return
self.data_logger.pickle(
self._data_files_dir + ('algorithm_itr_%02d.pkl' % itr),
copy.copy(self.algorithm)
)
self.data_logger.pickle(
self._data_files_dir + ('traj_sample_itr_%02d.pkl' % itr),
copy.copy(traj_sample_lists)
)
if pol_sample_lists:
self.data_logger.pickle(
self._data_files_dir + ('pol_sample_itr_%02d.pkl' % itr),
copy.copy(pol_sample_lists)
)
def _end(self):
""" Finish running and exit. """
if self.gui:
self.gui.set_status_text('Training complete.')
self.gui.end_mode()
if self._quit_on_end:
# Quit automatically (for running sequential expts)
os._exit(1)
def main():
""" Main function to be run. """
parser = argparse.ArgumentParser(description='Run the Guided Policy Search algorithm.')
parser.add_argument('experiment', type=str,
help='experiment name')
parser.add_argument('-n', '--new', action='store_true',
help='create new experiment')
parser.add_argument('-t', '--targetsetup', action='store_true',
help='run target setup')
parser.add_argument('-r', '--resume', metavar='N', type=int,
help='resume training from iter N')
parser.add_argument('-p', '--policy', metavar='N', type=int,
help='take N policy samples (for BADMM/MDGPS only)')
parser.add_argument('-s', '--silent', action='store_true',
help='silent debug print outs')
parser.add_argument('-q', '--quit', action='store_true',
help='quit GUI automatically when finished')
args = parser.parse_args()
#here args means that input we give from the terminal here
exp_name = args.experiment
resume_training_itr = args.resume
test_policy_N = args.policy
from gps import __file__ as gps_filepath
#this adds all the files in the directory
gps_filepath = os.path.abspath(gps_filepath)
#in this case only the __init__.py is this
gps_dir = '/'.join(str.split(gps_filepath, '/')[:-3]) + '/'
#this is the current directory
exp_dir = gps_dir + 'experiments/' + exp_name + '/'
hyperparams_file = exp_dir + 'hyperparams.py'
#here the code goes to the experiments folder in the gps and then the file corresponding
#to the experiment given in the arg_name
#hyperparameters are in the hyperparameters file
if args.silent:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
else:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
if args.new:
from shutil import copy
if os.path.exists(exp_dir):
sys.exit("Experiment '%s' already exists.\nPlease remove '%s'." %
(exp_name, exp_dir))
os.makedirs(exp_dir)
prev_exp_file = '.previous_experiment'
prev_exp_dir = None
try:
with open(prev_exp_file, 'r') as f:
prev_exp_dir = f.readline()
copy(prev_exp_dir + 'hyperparams.py', exp_dir)
if os.path.exists(prev_exp_dir + 'targets.npz'):
copy(prev_exp_dir + 'targets.npz', exp_dir)
except IOError as e:
with open(hyperparams_file, 'w') as f:
f.write('# To get started, copy over hyperparams from another experiment.\n' +
'# Visit rll.berkeley.edu/gps/hyperparams.html for documentation.')
with open(prev_exp_file, 'w') as f:
f.write(exp_dir)
exit_msg = ("Experiment '%s' created.\nhyperparams file: '%s'" %
(exp_name, hyperparams_file))
if prev_exp_dir and os.path.exists(prev_exp_dir):
exit_msg += "\ncopied from : '%shyperparams.py'" % prev_exp_dir
sys.exit(exit_msg)
if not os.path.exists(hyperparams_file):
sys.exit("Experiment '%s' does not exist.\nDid you create '%s'?" %
(exp_name, hyperparams_file))
hyperparams = imp.load_source('hyperparams', hyperparams_file)
if args.targetsetup:
try:
import matplotlib.pyplot as plt
from gps.agent.ros.agent_ros import AgentROS
from gps.gui.target_setup_gui import TargetSetupGUI
agent = AgentROS(hyperparams.config['agent'])
TargetSetupGUI(hyperparams.config['common'], agent)
plt.ioff()
plt.show()
except ImportError:
sys.exit('ROS required for target setup.')
elif test_policy_N:
import random
import numpy as np
import matplotlib.pyplot as plt
seed = hyperparams.config.get('random_seed', 0)
random.seed(seed)
np.random.seed(seed)
data_files_dir = exp_dir + 'data_files/'
data_filenames = os.listdir(data_files_dir)
algorithm_prefix = 'algorithm_itr_'
algorithm_filenames = [f for f in data_filenames if f.startswith(algorithm_prefix)]
current_algorithm = sorted(algorithm_filenames, reverse=True)[0]
current_itr = int(current_algorithm[len(algorithm_prefix):len(algorithm_prefix)+2])
gps = GPSMain(hyperparams.config)
if hyperparams.config['gui_on']:
test_policy = threading.Thread(
target=lambda: gps.test_policy(itr=current_itr, N=test_policy_N)
)
test_policy.daemon = True
test_policy.start()
plt.ioff()
plt.show()
else:
gps.test_policy(itr=current_itr, N=test_policy_N)
else:
import random
import numpy as np
import matplotlib.pyplot as plt
seed = hyperparams.config.get('random_seed', 0)
random.seed(seed)
np.random.seed(seed)
#here the GPS main here the hyperparameters are collection of the dictionary
#refer the hyperparamers file in the experiments folder
# following link gives info regarding the hyperparameters.
# https://github.com/cbfinn/gps/blob/master/docs/hyperparams.md
gps = GPSMain(hyperparams.config, args.quit)
if hyperparams.config['gui_on']:
run_gps = threading.Thread(
target=lambda: gps.run(itr_load=resume_training_itr)
)
run_gps.daemon = True
run_gps.start()
plt.ioff()
plt.show()
else:
gps.run(itr_load=resume_training_itr)
if __name__ == "__main__":
main()
|
bundlecontroller.py
|
#
#*******************************************************************************
#* Copyright (C) 2018, International Business Machines Corporation.
#* All Rights Reserved. *
#*******************************************************************************
#
# WML specific imports
from ibm_watson_machine_learning import APIClient
# REST handler specific imports
from .bundleresthandler import BundleRestHandler
# standard python imports
import logging
import json
import time
import threading
import pickle
import sys
#define tracer and logger
#logger for error which should and can! be handled by an administrator
#tracer for all other events that are of interest for developer
tracer = logging.getLogger(__name__)
logger = logging.getLogger("com.ibm.streams.log")
class BundleController():
"""
"""
def __init__(self,
expected_load = 0, #depricated
queue_size = None,
threads_per_node = None,
single_output = None,
node_count = None,
handler_class = None,
field_mapping = None,
output_function = None,
bundle_size = None
):
tracer.debug("__init__ called")
#############################################################
# Parameters
############################################################
self._expected_load = expected_load
self._max_queue_size = queue_size
self._threads_per_node = threads_per_node
self._single_output = single_output
self._node_count = node_count
self._max_request_size = bundle_size if expected_load is 0 else int(expected_load/self._threads_per_node/self._node_count)
self._handler_class = handler_class
############################################################
# internal variables
############################################################
self._input_queue = list([])
self._sending_threads = []
self._lock = threading.Condition() # changed to condition
self._output_lock = threading.Lock()
self._thread_finish_counter = 0
assert(self._handler_class is not None)
############################################################
# Configure the handler class to be used
# with the handler base classes class parameters
############################################################
self._handler_class.max_copy_size = self._max_request_size
self._handler_class.input_list_lock = self._lock
self._handler_class.source_data_list = self._input_queue
self._handler_class.single_output = self._single_output
self._handler_class.field_mapping = json.loads(field_mapping)
self._handler_class.output_function = output_function
tracer.debug("__init__ finished")
return
def process_data(self, input_data):
"""It is called for every single input data
It will be just stored in the input queue. On max queue size processing
blocks and backpressure on the up-stream/sending_thread happens.
"""
with self._lock:
self._lock.wait_for(lambda : len(self._input_queue) <= self._max_queue_size)
self._input_queue.append(input_data)
self._lock.notify()
def prepare(self):
self._create_sending_threads()
def run(self):
self._start_sending_threads()
def stop(self):
self._end_sending_threads()
def finish(self):
self._join_sending_threads()
def _change_thread_number(self,delta):
return
def _create_sending_threads(self):
for count in range(self._threads_per_node * self._node_count):
tracer.debug("Create thread")
handler_instance = self._handler_class(count)
thread_control = {'index':count,'run':True, 'handler':handler_instance}
thread_control['thread'] = threading.Thread(target = handler_instance.run)
self._sending_threads.append(thread_control)
tracer.debug("Thread data: %s",str(thread_control))
def _start_sending_threads(self):
for thread_control in self._sending_threads:
tracer.debug("Start sending thread %s",str(thread_control))
thread_control['thread'].start()
def _end_sending_threads(self):
for thread_control in self._sending_threads:
#thread_control['run'] = False
thread_control['handler'].stop()
def _join_sending_threads(self):
tracer.debug("_join_sending_threads called during processing of operator stop.")
# trigger threads to signal that they are ready
# each will decrement by 1 if all are ready it's again 0
#self._thread_finish_counter = len(self._sending_threads)
#tracer.debug("Wait for %d threads to finish processing of buffers", len(self._sending_threads))
# wait that the trigger becomes 0 and all threads left their task func
#while self._thread_finish_counter > 0 : time.sleep(1.0)
#tracer.debug("All threads finished processing of buffers")
for thread_control in self._sending_threads:
thread_control['thread'].join()
tracer.debug("Thread %d joined.", thread_control['index'])
|
test_web_backtest.py
|
#!usr/bin/env python3
#-*- coding:utf-8 -*-
"""
@author: yanqiong
@file: test_web.py
@create_on: 2020/2/12
@description: "Users/yanqiong/Documents/geckodriver-v0.26.0-macos.tar.gz"
"""
import os
import sys
import time
import unittest
import multiprocessing as mp
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import date
from tqsdk import TqApi, TqBacktest, TargetPosTask
from tqsdk.exceptions import BacktestFinished
# 子进程要执行的代码
def run_tianqin_code(port):
try:
api = TqApi(backtest=TqBacktest(start_dt=date(2018, 5, 5), end_dt=date(2018, 5, 10)), web_gui="127.0.0.1:" + port)
klines = api.get_kline_serial("DCE.m1901", 5 * 60, data_length=15)
target_pos = TargetPosTask(api, "DCE.m1901")
while True:
api.wait_update()
if api.is_changing(klines):
ma = sum(klines.close.iloc[-15:]) / 15
if klines.close.iloc[-1] > ma:
target_pos.set_target_volume(5)
elif klines.close.iloc[-1] < ma:
target_pos.set_target_volume(0)
except BacktestFinished as e:
while True:
api.wait_update()
except Exception as e:
api.close()
class WebBacktestTestOnChrome(unittest.TestCase):
def setUp(self):
ctx = mp.get_context('spawn')
self.port = "8082"
self.tq_process = ctx.Process(target=run_tianqin_code, args=(self.port,))
self.tq_process.start()
def tearDown(self):
self.tq_process.terminate()
@unittest.skipIf(not sys.platform.startswith("win"), "test on win")
def test_on_win(self):
chromedriver_path = os.path.join(os.getenv("ChromeWebDriver"), "chromedriver.exe")
run_for_driver(webdriver.Chrome(executable_path=chromedriver_path), self)
@unittest.skipIf(not sys.platform.startswith("linux"), "test on linux")
def test_on_linux(self):
exe_path = os.path.join(os.getenv("CHROMEWEBDRIVER"), "chromedriver")
opts = ChromeOptions()
opts.headless = True
driver = webdriver.Chrome(executable_path=exe_path, options=opts)
run_for_driver(driver, self)
@unittest.skipIf(not sys.platform.startswith("darwin"), "test on macos")
def test_on_macos(self):
run_for_driver(webdriver.Chrome(), self)
class WebBacktestTestOnFirefox(unittest.TestCase):
def setUp(self):
ctx = mp.get_context('spawn')
self.port = "8081"
self.tq_process = ctx.Process(target=run_tianqin_code, args=(self.port,))
self.tq_process.start()
def tearDown(self):
self.tq_process.terminate()
@unittest.skipIf(not sys.platform.startswith("win"), "test on win")
def test_on_win(self):
geckodriver_path = os.path.join(os.getenv("GeckoWebDriver"), "geckodriver.exe")
run_for_driver(webdriver.Firefox(executable_path=geckodriver_path), self)
@unittest.skipIf(not sys.platform.startswith("linux"), "test on linux")
def test_on_linux(self):
exe_path = os.path.join(os.getenv("GECKOWEBDRIVER"), "geckodriver")
opts = FirefoxOptions()
opts.headless = True
driver = webdriver.Firefox(executable_path=exe_path, options=opts)
run_for_driver(driver, self)
@unittest.skipIf(not sys.platform.startswith("darwin"), "test on macos")
def test_on_macos(self):
run_for_driver(webdriver.Firefox(), self)
def run_for_driver(driver, test):
time.sleep(10)
driver.implicitly_wait(30)
driver.get("http://127.0.0.1:" + test.port)
wait = WebDriverWait(driver, 30)
wait.until(EC.title_is("tqsdk-python-web")) # k线图显示
logo = driver.find_element_by_tag_name("img")
test.assertEqual("Tianqin", logo.get_attribute("alt"))
# K线是否有成交箭头
chart_main_marks = driver.find_element_by_css_selector("svg.tqchart>g.root g.main.marks")
trade_arrow_paths = chart_main_marks.find_element_by_css_selector("g.tradearrow")
wait = WebDriverWait(driver, 30)
wait.until(element_has_child(trade_arrow_paths, "path"))
# 成交列表是否显示
trades_table = driver.find_element_by_css_selector("div.reports.trades-table>table")
wait = WebDriverWait(driver, 30)
wait.until(element_has_child(trades_table, "tbody>tr"))
driver.close()
class element_has_child(object):
def __init__(self, element, css_selector):
self.element = element
self.css_selector = css_selector
def __call__(self, driver):
children = self.element.find_element_by_css_selector(self.css_selector)
print("children", children)
if not children:
return False
return True
if __name__ == "__main__":
unittest.main()
|
datasets.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Dataloaders and dataset utils
"""
import glob
import hashlib
import json
import logging
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool, Pool
from pathlib import Path
from threading import Thread
from zipfile import ZipFile
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import yaml
from PIL import Image, ImageOps, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective
from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \
xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy
from utils.torch_utils import torch_distributed_zero_first
# Parameters
HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.md5(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info["exif"] = exif.tobytes()
return image
def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0,
rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True):
p = str(Path(path).resolve()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
# YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0`
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
self.pipe = eval(pipe) if pipe.isnumeric() else pipe
self.cap = cv2.VideoCapture(self.pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.auto = auto
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
check_requirements(('pafy', 'youtube_dl'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame
while cap.isOpened() and n < f:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n % read == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
print('WARNING: Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] *= 0
cap.open(stream) # re-open stream if signal was lost
time.sleep(1 / self.fps[i]) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img0 = self.imgs.copy()
img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations() if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # same version
assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash
except:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
if cache['msgs']:
logging.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = segment[j]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
if segment:
self.segments[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs, self.img_npy = [None] * n, [None] * n
if cache_images:
if cache_images == 'disk':
self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
self.im_cache_dir.mkdir(parents=True, exist_ok=True)
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
if cache_images == 'disk':
if not self.img_npy[i].exists():
np.save(self.img_npy[i].as_posix(), x[0])
gb += self.img_npy[i].stat().st_size
else:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})'
pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..."
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))),
desc=desc, total=len(self.img_files))
for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [l, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
pbar.close()
if msgs:
logging.info('\n'.join(msgs))
if nf == 0:
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, len(self.img_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
logging.info(f'{prefix}New cache created: {path}')
except Exception as e:
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, i):
# loads 1 image from dataset index 'i', returns im, original hw, resized hw
im = self.imgs[i]
if im is None: # not cached in ram
npy = self.img_npy[i]
if npy and npy.exists(): # load npy
im = np.load(npy)
else: # read image
path = self.img_files[i]
im = cv2.imread(path) # BGR
assert im is not None, 'Image Not Found ' + path
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(im, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
else:
return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../datasets/coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted([x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS]) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
nl = len(l)
if nl:
assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected'
assert (l >= 0).all(), f'negative label values {l[l < 0]}'
assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}'
l = np.unique(l, axis=0) # remove duplicate rows
if len(l) < nl:
segments = np.unique(segments, axis=0)
msg = f'{prefix}WARNING: {im_file}: {nl - len(l)} duplicate labels removed'
else:
ne = 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
return im_file, l, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False):
""" Return dataset statistics dictionary with images and instances counts per split per class
To run in parent directory: export PYTHONPATH="$PWD/yolov5"
Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True)
Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip')
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
verbose: Print stats dictionary
"""
def round_labels(labels):
# Update labels to integer class and 6 decimal place floats
return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels]
def unzip(path):
# Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/'
if str(path).endswith('.zip'): # path is data.zip
assert Path(path).is_file(), f'Error unzipping {path}, file not found'
ZipFile(path).extractall(path=path.parent) # unzip
dir = path.with_suffix('') # dataset directory == zip name
return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path
else: # path is data.yaml
return False, None, path
def hub_ops(f, max_dim=1920):
# HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing
f_new = im_dir / Path(f).name # dataset-hub image filename
try: # use PIL
im = Image.open(f)
r = max_dim / max(im.height, im.width) # ratio
if r < 1.0: # image too large
im = im.resize((int(im.width * r), int(im.height * r)))
im.save(f_new, quality=75) # save
except Exception as e: # use OpenCV
print(f'WARNING: HUB ops PIL failure {f}: {e}')
im = cv2.imread(f)
im_height, im_width = im.shape[:2]
r = max_dim / max(im_height, im_width) # ratio
if r < 1.0: # image too large
im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(f_new), im)
zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
check_dataset(data, autodownload) # download dataset if missing
hub_dir = Path(data['path'] + ('-hub' if hub else ''))
stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary
for split in 'train', 'val', 'test':
if data.get(split) is None:
stats[split] = None # i.e. no test set
continue
x = []
dataset = LoadImagesAndLabels(data[split]) # load dataset
for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'):
x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc']))
x = np.array(x) # shape(128x80)
stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()},
'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()),
'per_class': (x > 0).sum(0).tolist()},
'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in
zip(dataset.img_files, dataset.labels)]}
if hub:
im_dir = hub_dir / 'images'
im_dir.mkdir(parents=True, exist_ok=True)
for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'):
pass
# Profile
stats_path = hub_dir / 'stats.json'
if profile:
for _ in range(1):
file = stats_path.with_suffix('.npy')
t1 = time.time()
np.save(file, stats)
t2 = time.time()
x = np.load(file, allow_pickle=True)
print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
file = stats_path.with_suffix('.json')
t1 = time.time()
with open(file, 'w') as f:
json.dump(stats, f) # save stats *.json
t2 = time.time()
with open(file, 'r') as f:
x = json.load(f) # load hyps dict
print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write')
# Save, print and return
if hub:
print(f'Saving {stats_path.resolve()}...')
with open(stats_path, 'w') as f:
json.dump(stats, f) # save stats.json
if verbose:
print(json.dumps(stats, indent=2, sort_keys=False))
return stats
|
plugin.py
|
import time
from threading import Thread
import helper
from brewtils.plugin import RemotePlugin
from brewtils.decorators import system, parameter
thread_map = {}
def start_plugin(plugin, client):
global thread_map
t = Thread(target=plugin.run)
t.daemon = True
t.start()
t.join(1)
if t.is_alive():
thread_map[plugin.unique_name] = {'thread': t, 'plugin': plugin}
else:
raise Exception("Could not start plugin %s" % plugin.unique_name)
wait_for_status(client, plugin.instance.id)
def wait_for_status(client, instance_id, timeout=5, max_delay=1):
instance = helper.get_instance(client, instance_id)
delay_time = 0.01
total_wait_time = 0
while instance.status not in ['RUNNING', 'STOPPED', 'DEAD']:
if timeout and total_wait_time > timeout:
raise Exception("Timed out waiting for instance to start")
time.sleep(delay_time)
total_wait_time += delay_time
delay_time = min(delay_time * 2, max_delay)
instance = helper.get_instance(client, instance.id)
return instance
def stop_plugin(plugin):
if plugin.unique_name in thread_map:
p = thread_map[plugin.unique_name]['plugin']
t = thread_map[plugin.unique_name]['thread']
p._stop('request')
t.join(2)
if t.is_alive():
raise Exception("Could not stop plugin: %s" % plugin.unique_name)
def create_plugin(name, version, clazz, **kwargs):
config = helper.get_config()
return RemotePlugin(client=clazz(), name=name, version=version,
bg_host=config.bg_host, bg_port=config.bg_port,
ssl_enabled=config.ssl_enabled, **kwargs)
@system
class TestPluginV1(object):
@parameter(key="x", type="Integer")
@parameter(key="y", type="Integer")
def add(self, x, y):
"""Add"""
return x + y
@system
class TestPluginV2(object):
@parameter(key="x", type="Integer")
@parameter(key="y", type="Integer")
def add(self, x, y):
"""Add"""
return x + y
@parameter(key="x", type="Integer")
@parameter(key="y", type="Integer")
def subtract(self, x, y):
"""Add"""
return x - y
@system
class TestPluginV1BetterDescriptions(object):
@parameter(key="x", type="Integer",
description="X, which represents an integer")
@parameter(key="y", type="Integer",
description="Y, will be added to X (also an integer)")
def add(self, x, y):
"""Add two numbers together, this description is much better"""
return x + y
|
plotting.py
|
"""
vtki plotting module
"""
import collections
import ctypes
import logging
import os
import time
from threading import Thread
from subprocess import PIPE, Popen
import imageio
import numpy as np
import vtk
from vtk.util import numpy_support as VN
import vtki
from vtki.export import export_plotter_vtkjs
from vtki.utilities import (get_scalar, is_vtki_obj, numpy_to_texture, wrap,
_raise_not_matching)
_ALL_PLOTTERS = {}
def close_all():
"""Close all open/active plotters"""
for key, p in _ALL_PLOTTERS.items():
p.close()
_ALL_PLOTTERS.clear()
return True
MAX_N_COLOR_BARS = 10
PV_BACKGROUND = [82/255., 87/255., 110/255.]
FONT_KEYS = {'arial': vtk.VTK_ARIAL,
'courier': vtk.VTK_COURIER,
'times': vtk.VTK_TIMES}
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
rcParams = {
'background' : [0.3, 0.3, 0.3],
'camera' : {
'position' : [1, 1, 1],
'viewup' : [0, 0, 1],
},
'window_size' : [1024, 768],
'font' : {
'family' : 'courier',
'size' : 12,
'title_size': None,
'label_size' : None,
'color' : [1, 1, 1],
'fmt' : None,
},
'cmap' : 'jet',
'color' : 'white',
'nan_color' : 'darkgray',
'outline_color' : 'white',
'colorbar_orientation' : 'horizontal',
'colorbar_horizontal' : {
'width' : 0.60,
'height' : 0.08,
'position_x' : 0.35,
'position_y' : 0.02,
},
'colorbar_vertical' : {
'width' : 0.1,
'height' : 0.8,
'position_x' : 0.85,
'position_y' : 0.1,
},
'show_scalar_bar' : True,
'show_edges' : False,
'lighting' : True,
'interactive' : False,
'render_points_as_spheres' : False,
'use_panel' : True,
}
DEFAULT_THEME = dict(rcParams)
def set_plot_theme(theme):
"""Set the plotting parameters to a predefined theme"""
if theme.lower() in ['paraview', 'pv']:
rcParams['background'] = PV_BACKGROUND
rcParams['cmap'] = 'coolwarm'
rcParams['font']['family'] = 'arial'
rcParams['font']['label_size'] = 16
rcParams['show_edges'] = False
elif theme.lower() in ['document', 'doc', 'paper', 'report']:
rcParams['background'] = 'white'
rcParams['cmap'] = 'viridis'
rcParams['font']['size'] = 18
rcParams['font']['title_size'] = 18
rcParams['font']['label_size'] = 18
rcParams['font']['color'] = 'black'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'black'
elif theme.lower() in ['night', 'dark']:
rcParams['background'] = 'black'
rcParams['cmap'] = 'viridis'
rcParams['font']['color'] = 'white'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'white'
elif theme.lower() in ['default']:
for k,v in DEFAULT_THEME.items():
rcParams[k] = v
def run_from_ipython():
""" returns True when run from IPython """
try:
py = __IPYTHON__
return True
except NameError:
return False
def opacity_transfer_function(key, n_colors):
"""Get the opacity transfer function results: range from 0 to 255
"""
transfer_func = {
'linear': np.linspace(0, 255, n_colors, dtype=np.uint8),
'linear_r': np.linspace(0, 255, n_colors, dtype=np.uint8)[::-1],
'geom': np.geomspace(1e-6, 255, n_colors, dtype=np.uint8),
'geom_r': np.geomspace(255, 1e-6, n_colors, dtype=np.uint8),
}
try:
return transfer_func[key]
except KeyError:
raise KeyError('opactiy transfer function ({}) unknown.'.format(key))
def plot(var_item, off_screen=None, full_screen=False, screenshot=None,
interactive=True, cpos=None, window_size=None,
show_bounds=False, show_axes=True, notebook=None, background=None,
text='', return_img=False, eye_dome_lighting=False, use_panel=None,
**kwargs):
"""
Convenience plotting function for a vtk or numpy object.
Parameters
----------
item : vtk or numpy object
VTK object or numpy array to be plotted.
off_screen : bool
Plots off screen when True. Helpful for saving screenshots
without a window popping up.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores window_size.
Default False.
screenshot : str or bool, optional
Saves screenshot to file when enabled. See:
help(vtkinterface.Plotter.screenshot). Default disabled.
When True, takes screenshot and returns numpy array of image.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
show_bounds : bool, optional
Shows mesh bounds when True. Default False. Alias ``show_grid`` also
accepted.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active.
show_axes : bool, optional
Shows a vtk axes widget. Enabled by default.
text : str, optional
Adds text at the bottom of the plot.
**kwargs : optional keyword arguments
See help(Plotter.add_mesh) for additional options.
Returns
-------
cpos : list
List of camera position, focal point, and view up.
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Returned only when screenshot enabled
"""
if notebook is None:
if run_from_ipython():
try:
notebook = type(get_ipython()).__module__.startswith('ipykernel.')
except NameError:
pass
if notebook:
off_screen = notebook
plotter = Plotter(off_screen=off_screen, notebook=notebook)
if show_axes:
plotter.add_axes()
plotter.set_background(background)
if isinstance(var_item, list):
if len(var_item) == 2: # might be arrows
isarr_0 = isinstance(var_item[0], np.ndarray)
isarr_1 = isinstance(var_item[1], np.ndarray)
if isarr_0 and isarr_1:
plotter.add_arrows(var_item[0], var_item[1])
else:
for item in var_item:
plotter.add_mesh(item, **kwargs)
else:
for item in var_item:
plotter.add_mesh(item, **kwargs)
else:
plotter.add_mesh(var_item, **kwargs)
if text:
plotter.add_text(text)
if show_bounds or kwargs.get('show_grid', False):
if kwargs.get('show_grid', False):
plotter.show_grid()
else:
plotter.show_bounds()
if cpos is None:
cpos = plotter.get_default_cam_pos()
plotter.camera_position = cpos
plotter.camera_set = False
else:
plotter.camera_position = cpos
if eye_dome_lighting:
plotter.enable_eye_dome_lighting()
result = plotter.show(window_size=window_size,
auto_close=False,
interactive=interactive,
full_screen=full_screen,
screenshot=screenshot,
return_img=return_img,
use_panel=use_panel)
# close and return camera position and maybe image
plotter.close()
# Result will be handled by plotter.show(): cpos or [cpos, img]
return result
def plot_arrows(cent, direction, **kwargs):
"""
Plots arrows as vectors
Parameters
----------
cent : np.ndarray
Accepts a single 3d point or array of 3d points.
directions : np.ndarray
Accepts a single 3d point or array of 3d vectors.
Must contain the same number of items as cent.
**kwargs : additional arguments, optional
See help(vtki.Plot)
Returns
-------
Same as Plot. See help(vtki.Plot)
"""
return plot([cent, direction], **kwargs)
def system_supports_plotting():
"""
Check if x server is running
Returns
-------
system_supports_plotting : bool
True when on Linux and running an xserver. Returns None when
on a non-linux platform.
"""
try:
if os.environ['ALLOW_PLOTTING'] == 'True':
return True
except KeyError:
pass
try:
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
return p.returncode == 0
except:
return False
class BasePlotter(object):
"""
To be used by the Plotter and QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
border_width : float, optional
Width of the border in pixels when enabled.
"""
def __new__(cls, *args, **kwargs):
if cls is BasePlotter:
raise TypeError("vtki.BasePlotter is an abstract class and may not be instantiated.")
return object.__new__(cls)
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=1.0):
""" Initialize base plotter """
self.image_transparent_background = False
# by default add border for multiple plots
if border is None:
if shape != (1, 1):
border = True
else:
border = False
# add render windows
self.renderers = []
self._active_renderer_index = 0
assert_str = '"shape" should be a list or tuple'
assert isinstance(shape, collections.Iterable), assert_str
assert shape[0] > 0, '"shape" must be positive'
assert shape[1] > 0, '"shape" must be positive'
self.shape = shape
for i in reversed(range(shape[0])):
for j in range(shape[1]):
renderer = vtki.Renderer(self, border, border_color, border_width)
x0 = i/shape[0]
y0 = j/shape[1]
x1 = (i+1)/shape[0]
y1 = (j+1)/shape[1]
renderer.SetViewport(y0, x0, y1, x1)
self.renderers.append(renderer)
# This is a private variable to keep track of how many colorbars exist
# This allows us to keep adding colorbars without overlapping
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
# This keeps track of scalar names already plotted and their ranges
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
self._actors = {}
# track if the camera has been setup
# self.camera_set = False
self._first_time = True
# Keep track of the scale
self._labels = []
# Set default style
self._style = vtk.vtkInteractorStyleRubberBandPick()
# Add self to open plotters
_ALL_PLOTTERS[str(hex(id(self)))] = self
# lighting style
self.lighting = vtk.vtkLightKit()
# self.lighting.SetHeadLightWarmth(1.0)
# self.lighting.SetHeadLightWarmth(1.0)
for renderer in self.renderers:
self.lighting.AddLightsToRenderer(renderer)
renderer.LightFollowCameraOn()
def update_style(self):
if not hasattr(self, '_style'):
self._style = vtk.vtkInteractorStyleTrackballCamera()
if hasattr(self, 'iren'):
return self.iren.SetInteractorStyle(self._style)
def enable_trackball_style(self):
""" sets the interactive style to trackball - the default syle """
self._style = vtk.vtkInteractorStyleTrackballCamera()
return self.update_style()
def enable_image_style(self):
""" sets the interactive style to image
Controls:
- Left Mouse button triggers window level events
- CTRL Left Mouse spins the camera around its view plane normal
- SHIFT Left Mouse pans the camera
- CTRL SHIFT Left Mouse dollys (a positional zoom) the camera
- Middle mouse button pans the camera
- Right mouse button dollys the camera.
- SHIFT Right Mouse triggers pick events
"""
self._style = vtk.vtkInteractorStyleImage()
return self.update_style()
def enable_joystick_style(self):
""" sets the interactive style to joystick
allows the user to move (rotate, pan, etc.) the camera, the point of
view for the scene. The position of the mouse relative to the center of
the scene determines the speed at which the camera moves, and the speed
of the mouse movement determines the acceleration of the camera, so the
camera continues to move even if the mouse if not moving.
For a 3-button mouse, the left button is for rotation, the right button
for zooming, the middle button for panning, and ctrl + left button for
spinning. (With fewer mouse buttons, ctrl + shift + left button is
for zooming, and shift + left button is for panning.)
"""
self._style = vtk.vtkInteractorStyleJoystickCamera()
return self.update_style()
def enable_zoom_style(self):
""" sets the interactive style to rubber band zoom
This interactor style allows the user to draw a rectangle in the render
window using the left mouse button. When the mouse button is released,
the current camera zooms by an amount determined from the shorter side
of the drawn rectangle.
"""
self._style = vtk.vtkInteractorStyleRubberBandZoom()
return self.update_style()
def enable_terrain_style(self):
""" sets the interactive style to terrain
Used to manipulate a camera which is viewing a scene with a natural
view up, e.g., terrain. The camera in such a scene is manipulated by
specifying azimuth (angle around the view up vector) and elevation
(the angle from the horizon).
"""
self._style = vtk.vtkInteractorStyleTerrain()
return self.update_style()
def enable_rubber_band_style(self):
""" sets the interactive style to rubber band picking
This interactor style allows the user to draw a rectangle in the render
window by hitting 'r' and then using the left mouse button.
When the mouse button is released, the attached picker operates on the
pixel in the center of the selection rectangle. If the picker happens to
be a vtkAreaPicker it will operate on the entire selection rectangle.
When the 'p' key is hit the above pick operation occurs on a 1x1
rectangle. In other respects it behaves the same as its parent class.
"""
self._style = vtk.vtkInteractorStyleRubberBandPick()
return self.update_style()
def set_focus(self, point):
""" sets focus to a point """
if isinstance(point, np.ndarray):
if point.ndim != 1:
point = point.ravel()
self.camera.SetFocalPoint(point)
self._render()
def set_position(self, point, reset=False):
""" sets camera position to a point """
if isinstance(point, np.ndarray):
if point.ndim != 1:
point = point.ravel()
self.camera.SetPosition(point)
if reset:
self.reset_camera()
self.camera_set = True
self._render()
def set_viewup(self, vector):
""" sets camera viewup vector """
if isinstance(vector, np.ndarray):
if vector.ndim != 1:
vector = vector.ravel()
self.camera.SetViewUp(vector)
self._render()
def _render(self):
""" redraws render window if the render window exists """
if hasattr(self, 'ren_win'):
if hasattr(self, 'render_trigger'):
self.render_trigger.emit()
elif not self._first_time:
self.render()
def add_axes(self, interactive=None, color=None):
""" Add an interactive axes widget """
if interactive is None:
interactive = rcParams['interactive']
if hasattr(self, 'axes_widget'):
self.axes_widget.SetInteractive(interactive)
self._update_axes_color(color)
return
self.axes_actor = vtk.vtkAxesActor()
self.axes_widget = vtk.vtkOrientationMarkerWidget()
self.axes_widget.SetOrientationMarker(self.axes_actor)
if hasattr(self, 'iren'):
self.axes_widget.SetInteractor(self.iren)
self.axes_widget.SetEnabled(1)
self.axes_widget.SetInteractive(interactive)
# Set the color
self._update_axes_color(color)
def hide_axes(self):
"""Hide the axes orientation widget"""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOff()
def show_axes(self):
"""Show the axes orientation widget"""
if hasattr(self, 'axes_widget'):
self.axes_widget.EnabledOn()
else:
self.add_axes()
def key_press_event(self, obj, event):
""" Listens for key press event """
key = self.iren.GetKeySym()
log.debug('Key %s pressed' % key)
if key == 'q':
self.q_pressed = True
elif key == 'b':
self.observer = self.iren.AddObserver('LeftButtonPressEvent',
self.left_button_down)
elif key == 'v':
self.isometric_view_interactive()
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click"""
# Get 2D click location on window
click_pos = self.iren.GetEventPosition()
# Get corresponding click location in the 3D plot
picker = vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
def isometric_view_interactive(self):
""" sets the current interactive render window to isometric view """
interactor = self.iren.GetInteractorStyle()
renderer = interactor.GetCurrentRenderer()
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""
Update window, redraw, process messages query
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor in
milliseconds.
force_redraw : bool, optional
Call vtkRenderWindowInteractor.Render() immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if not hasattr(self, 'iren'):
return
update_rate = self.iren.GetDesiredUpdateRate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.CreateRepeatingTimer(stime)
self.iren.Start()
self.iren.DestroyTimer(self.right_timer_id)
self._render()
Plotter.last_update_time = curr_time
else:
if force_redraw:
self.iren.Render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
rng=None, stitle=None, show_edges=None,
point_size=5.0, opacity=1.0, line_width=None,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=False, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None,
multi_colors=False, name=None, texture=None,
render_points_as_spheres=None,
render_lines_as_tubes=False, edge_color='black',
ambient=0.0, show_scalar_bar=None, nan_color=None,
nan_opacity=1.0, loc=None, backface_culling=False,
rgb=False, categories=False, **kwargs):
"""
Adds a unstructured, structured, or surface mesh to the
plotting object.
Also accepts a 3D numpy.ndarray
Parameters
----------
mesh : vtk unstructured, structured, polymesh, or 3D numpy.ndarray
A vtk unstructured, structured, or polymesh to plot.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
Color will be overridden when scalars are input.
style : string, optional
Visualization style of the vtk mesh. One for the following:
style='surface'
style='wireframe'
style='points'
Defaults to 'surface'
scalars : numpy array, optional
Scalars used to "color" the mesh. Accepts an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
color and scalars are None, then the active scalars are
used
rng : 2 item list, optional
Range of mapper for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``clim``
is also an accepted alias for this.
stitle : string, optional
Scalar title. By default there is no scalar legend bar.
Setting this creates the legend bar and adds a title to
it. To create a bar with no title, use an empty string
(i.e. '').
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
point_size : float, optional
Point size. Applicable when style='points'. Default 5.0
opacity : float, optional
Opacity of mesh. Should be between 0 and 1. Default 1.0.
A string option can also be specified to map the scalar range
to the opacity. Options are: linear, linear_r, geom, geom_r
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
flip_scalars : bool, optional
Flip direction of cmap.
lighting : bool, optional
Enable or disable view direction lighting. Default False.
n_colors : int, optional
Number of colors to use when displaying scalars. Default
256.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalar display. Default
False
cmap : str, optional
cmap string. See available matplotlib cmaps. Only
applicable for when displaying scalars. Defaults None
(rainbow). Requires matplotlib.
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or boolean, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first avaialble texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.2.
nan_color : string or 3 item list, optional, defaults to gray
The color to use for all NaN values in the plotted scalar
array.
nan_opacity : float, optional
Opacity of NaN values. Should be between 0 and 1.
Default 1.0
backface_culling : bool optional
Does not render faces that should not be visible to the
plotter. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot those
values as RGB+A colors! ``rgba`` is also accepted alias for this.
categories : bool, optional
If fetching a colormap from matplotlib, this is the number of
categories to use in that colormap. If set to ``True``, then
the number of unique values in the scalar array will be used.
Returns
-------
actor: vtk.vtkActor
VTK actor of the mesh.
"""
# fixes lighting issue when using precalculated normals
if isinstance(mesh, vtk.vtkPolyData):
if mesh.GetPointData().HasArray('Normals'):
mesh.point_arrays['Normals'] = mesh.point_arrays.pop('Normals')
if scalar_bar_args is None:
scalar_bar_args = {}
if isinstance(mesh, np.ndarray):
mesh = vtki.PolyData(mesh)
style = 'points'
# Convert the VTK data object to a vtki wrapped object if neccessary
if not is_vtki_obj(mesh):
mesh = wrap(mesh)
if show_edges is None:
show_edges = rcParams['show_edges']
if show_scalar_bar is None:
show_scalar_bar = rcParams['show_scalar_bar']
if lighting is None:
lighting = rcParams['lighting']
if rng is None:
rng = kwargs.get('clim', None)
if render_points_as_spheres is None:
render_points_as_spheres = rcParams['render_points_as_spheres']
if name is None:
name = '{}({})'.format(type(mesh).__name__, str(hex(id(mesh))))
if isinstance(mesh, vtki.MultiBlock):
self.remove_actor(name, reset_camera=reset_camera)
# frist check the scalars
if rng is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalar specified
if isinstance(scalars, str):
rng = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise RuntimeError('Scalar array must be given as a string name for multiblock datasets.')
if multi_colors:
# Compute unique colors for each index of the block
try:
import matplotlib as mpl
from itertools import cycle
cycler = mpl.rcParams['axes.prop_cycle']
colors = cycle(cycler)
except ImportError:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = '{}-{}'.format(name, idx)
# Get the data object
if not is_vtki_obj(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_vtki_obj(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None:
# Note that a block can exist but be None type
continue
# Now check that scalars is available for this dataset
if isinstance(data, vtk.vtkMultiBlockDataSet) or get_scalar(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
a = self.add_mesh(data, color=color, style=style,
scalars=ts, rng=rng, stitle=stitle,
show_edges=show_edges,
point_size=point_size, opacity=opacity,
line_width=line_width,
flip_scalars=flip_scalars,
lighting=lighting, n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap, label=label,
scalar_bar_args=scalar_bar_args,
reset_camera=reset_camera, name=next_name,
texture=None,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
edge_color=edge_color,
show_scalar_bar=show_scalar_bar, nan_color=nan_color,
nan_opacity=nan_opacity,
loc=loc, rgb=rgb, **kwargs)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
if nan_color is None:
nan_color = rcParams['nan_color']
nanr, nanb, nang = parse_color(nan_color)
nan_color = nanr, nanb, nang, nan_opacity
if color is True:
color = rcParams['color']
if mesh.n_points < 1:
raise RuntimeError('Empty meshes cannot be plotted. Input mesh has zero points.')
# set main values
self.mesh = mesh
self.mapper = vtk.vtkDataSetMapper()
self.mapper.SetInputData(self.mesh)
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
actor, prop = self.add_actor(self.mapper,
reset_camera=reset_camera,
name=name, loc=loc, culling=backface_culling)
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalar components are not vectors/tuples
scalars = mesh.active_scalar
if scalars is None:# or scalars.ndim != 1:
scalars = None
else:
if stitle is None:
stitle = mesh.active_scalar_info[1]
if texture == True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (vtk.vtkTexture, vtk.vtkOpenGLTexture)):
raise TypeError('Invalid texture type ({})'.format(type(texture)))
if mesh.GetPointData().GetTCoords() is None:
raise AssertionError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# Scalar formatting ===================================================
if cmap is None: # grab alias for cmaps: colormap
cmap = kwargs.get('colormap', None)
if cmap is None: # Set default map if matplotlib is avaialble
try:
import matplotlib
cmap = rcParams['cmap']
except ImportError:
pass
title = 'Data' if stitle is None else stitle
if scalars is not None:
# if scalars is a string, then get the first array found with that name
append_scalars = True
if isinstance(scalars, str):
title = scalars
scalars = get_scalar(mesh, scalars,
preference=kwargs.get('preference', 'cell'), err=True)
if stitle is None:
stitle = title
#append_scalars = False
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if rgb is False or rgb is None:
rgb = kwargs.get('rgba', False)
if rgb:
if scalars.ndim != 2 or scalars.shape[1] < 3 or scalars.shape[1] > 4:
raise ValueError('RGB array must be n_points/n_cells by 3/4 in shape.')
if scalars.ndim != 1:
if rgb:
pass
elif scalars.ndim == 2 and (scalars.shape[0] == mesh.n_points or scalars.shape[0] == mesh.n_cells):
scalars = np.linalg.norm(scalars.copy(), axis=1)
title = '{}-normed'.format(title)
else:
scalars = scalars.ravel()
if scalars.dtype == np.bool:
scalars = scalars.astype(np.float)
# Scalar interpolation approach
if scalars.shape[0] == mesh.n_points:
self.mesh._add_point_scalar(scalars, title, append_scalars)
self.mapper.SetScalarModeToUsePointData()
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
elif scalars.shape[0] == mesh.n_cells:
self.mesh._add_cell_scalar(scalars, title, append_scalars)
self.mapper.SetScalarModeToUseCellData()
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
else:
_raise_not_matching(scalars, mesh)
# Set scalar range
if rng is None:
rng = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(rng, float) or isinstance(rng, int):
rng = [-rng, rng]
if np.any(rng) and not rgb:
self.mapper.SetScalarRange(rng[0], rng[1])
# Flip if requested
table = self.mapper.GetLookupTable()
table.SetNanColor(nan_color)
if cmap is not None:
try:
from matplotlib.cm import get_cmap
except ImportError:
cmap = None
logging.warning('Please install matplotlib for color maps.')
if cmap is not None:
try:
from matplotlib.cm import get_cmap
except ImportError:
raise Exception('cmap requires matplotlib')
if isinstance(cmap, str):
if categories:
if categories is True:
categories = len(np.unique(scalars))
cmap = get_cmap(cmap, categories)
else:
cmap = get_cmap(cmap)
# ELSE: assume cmap is callable
ctable = cmap(np.linspace(0, 1, n_colors))*255
ctable = ctable.astype(np.uint8)
# Set opactities
if isinstance(opacity, str):
ctable[:,-1] = opacity_transfer_function(opacity, n_colors)
if flip_scalars:
ctable = np.ascontiguousarray(ctable[::-1])
table.SetTable(VN.numpy_to_vtk(ctable))
else: # no cmap specified
if flip_scalars:
table.SetHueRange(0.0, 0.66667)
else:
table.SetHueRange(0.66667, 0.0)
else:
self.mapper.SetScalarModeToUseFieldData()
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = rcParams['outline_color']
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise Exception('Invalid style. Must be one of the following:\n' +
'\t"surface"\n' +
'\t"wireframe"\n' +
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
geom = single_triangle()
if scalars is not None:
geom = vtki.Box()
rgb_color = parse_color('black')
self._labels.append([geom, label, rgb_color])
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
# Add scalar bar if available
if stitle is not None and show_scalar_bar and not rgb:
self.add_scalar_bar(stitle, **scalar_bar_args)
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
2 item list
The new range of scalar bar. Example: ``[-1, 2]``.
name : str, optional
The title of the scalar bar to update
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise RuntimeError('This plotter does not have an active mapper.')
return self.mapper.SetScalarRange(*clim)
# Use the name to find the desired actor
def update_mapper(mapper):
return mapper.SetScalarRange(*clim)
try:
for m in self._scalar_bar_mappers[name]:
update_mapper(m)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
@property
def camera_set(self):
""" Returns if the camera of the active renderer has been set """
return self.renderer.camera_set
def get_default_cam_pos(self):
""" Return the default camera position of the active renderer """
return self.renderer.get_default_cam_pos()
@camera_set.setter
def camera_set(self, is_set):
""" Sets if the camera has been set on the active renderer"""
self.renderer.camera_set = is_set
@property
def renderer(self):
""" simply returns the active renderer """
return self.renderers[self._active_renderer_index]
@property
def bounds(self):
""" Returns the bounds of the active renderer """
return self.renderer.bounds
@property
def center(self):
""" Returns the center of the active renderer """
return self.renderer.center
def update_bounds_axes(self):
""" Update the bounds of the active renderer """
return self.renderer.update_bounds_axes()
def clear(self):
""" Clears plot by removing all actors and properties """
for renderer in self.renderers:
renderer.RemoveAllViewProps()
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
self._scalar_bar_actors = {}
self._scalar_bar_widgets = {}
def remove_actor(self, actor, reset_camera=False):
"""
Removes an actor from the Plotter.
Parameters
----------
actor : vtk.vtkActor
Actor that has previously added to the Renderer.
reset_camera : bool, optional
Resets camera so all actors can be seen.
Returns
-------
success : bool
True when actor removed. False when actor has not been
removed.
"""
for renderer in self.renderers:
renderer.remove_actor(actor, reset_camera)
return True
def add_actor(self, uinput, reset_camera=False, name=None, loc=None,
culling=False):
"""
Adds an actor to render window. Creates an actor if input is
a mapper.
Parameters
----------
uinput : vtk.vtkMapper or vtk.vtkActor
vtk mapper or vtk actor to be added.
reset_camera : bool, optional
Resets the camera when true.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
culling : bool optional
Does not render faces that should not be visible to the
plotter. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
Returns
-------
actor : vtk.vtkActor
The actor.
actor_properties : vtk.Properties
Actor properties.
"""
# add actor to the correct render window
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_actor(uinput, reset_camera, name, culling)
def loc_to_index(self, loc):
"""
Return index of the render window given a location index.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Returns
-------
idx : int
Index of the render window.
"""
if loc is None:
return self._active_renderer_index
elif isinstance(loc, int):
return loc
elif isinstance(loc, collections.Iterable):
assert len(loc) == 2, '"loc" must contain two items'
return loc[0]*self.shape[0] + loc[1]
def index_to_loc(self, index):
"""Convert a 1D index location to the 2D location on the plotting grid
"""
sz = int(self.shape[0] * self.shape[1])
idxs = np.array([i for i in range(sz)], dtype=int).reshape(self.shape)
args = np.argwhere(idxs == index)
if len(args) < 1:
raise RuntimeError('Index ({}) is out of range.')
return args[0]
@property
def camera(self):
""" The active camera of the active renderer """
return self.renderer.camera
def add_axes_at_origin(self, loc=None):
"""
Add axes actor at the origin of a render window.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. When None, defaults to the
active render window.
Returns
--------
marker_actor : vtk.vtkAxesActor
vtkAxesActor actor
"""
self._active_renderer_index = self.loc_to_index(loc)
return self.renderers[self._active_renderer_index].add_axes_at_origin()
def show_bounds(self, mesh=None, bounds=None, show_xaxis=True,
show_yaxis=True, show_zaxis=True, show_xlabels=True,
show_ylabels=True, show_zlabels=True, italic=False,
bold=True, shadow=False, font_size=None,
font_family=None, color=None,
xlabel='X Axis', ylabel='Y Axis', zlabel='Z Axis',
use_2d=False, grid=None, location='closest', ticks=None,
all_edges=False, corner_factor=0.5, fmt=None,
minor_ticks=False, loc=None, padding=0.0):
"""
Adds bounds axes. Shows the bounds of the most recent input
mesh unless mesh is specified.
Parameters
----------
mesh : vtkPolydata or unstructured grid, optional
Input mesh to draw bounds axes around
bounds : list or tuple, optional
Bounds to override mesh bounds.
[xmin, xmax, ymin, ymax, zmin, zmax]
show_xaxis : bool, optional
Makes x axis visible. Default True.
show_yaxis : bool, optional
Makes y axis visible. Default True.
show_zaxis : bool, optional
Makes z axis visible. Default True.
show_xlabels : bool, optional
Shows x labels. Default True.
show_ylabels : bool, optional
Shows y labels. Default True.
show_zlabels : bool, optional
Shows z labels. Default True.
italic : bool, optional
Italicises axis labels and numbers. Default False.
bold : bool, optional
Bolds axis labels and numbers. Default True.
shadow : bool, optional
Adds a black shadow to the text. Default False.
font_size : float, optional
Sets the size of the label font. Defaults to 16.
font_family : string, optional
Font family. Must be either courier, times, or arial.
color : string or 3 item list, optional
Color of all labels and axis titles. Default white.
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
xlabel : string, optional
Title of the x axis. Default "X Axis"
ylabel : string, optional
Title of the y axis. Default "Y Axis"
zlabel : string, optional
Title of the z axis. Default "Z Axis"
use_2d : bool, optional
A bug with vtk 6.3 in Windows seems to cause this function
to crash this can be enabled for smoother plotting for
other enviornments.
grid : bool or str, optional
Add grid lines to the backface (``True``, ``'back'``, or
``'backface'``) or to the frontface (``'front'``,
``'frontface'``) of the axes actor.
location : str, optional
Set how the axes are drawn: either static (``'all'``),
closest triad (``front``), furthest triad (``'back'``),
static closest to the origin (``'origin'``), or outer
edges (``'outer'``) in relation to the camera
position. Options include: ``'all', 'front', 'back',
'origin', 'outer'``
ticks : str, optional
Set how the ticks are drawn on the axes grid. Options include:
``'inside', 'outside', 'both'``
all_edges : bool, optional
Adds an unlabeled and unticked box at the boundaries of
plot. Useful for when wanting to plot outer grids while
still retaining all edges of the boundary.
corner_factor : float, optional
If ``all_edges````, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full box.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
padding : float, optional
An optional percent padding along each axial direction to cushion
the datasets in the scene from the axes annotations. Defaults to
have no padding
Returns
-------
cube_axes_actor : vtk.vtkCubeAxesActor
Bounds actor
Examples
--------
>>> import vtki
>>> from vtki import examples
>>> mesh = vtki.Sphere()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.show_bounds(grid='front', location='outer', all_edges=True)
>>> plotter.show() # doctest:+SKIP
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.show_bounds(**kwargs)
def add_bounds_axes(self, *args, **kwargs):
"""Deprecated"""
logging.warning('`add_bounds_axes` is deprecated. Use `show_bounds` or `show_grid`.')
return self.show_bounds(*args, **kwargs)
def add_bounding_box(self, color=None, corner_factor=0.5, line_width=None,
opacity=1.0, render_lines_as_tubes=False, lighting=None,
reset_camera=None, loc=None):
"""
Adds an unlabeled and unticked box at the boundaries of
plot. Useful for when wanting to plot outer grids while
still retaining all edges of the boundary.
Parameters
----------
corner_factor : float, optional
If ``all_edges``, this is the factor along each axis to
draw the default box. Dafuault is 0.5 to show the full
box.
corner_factor : float, optional
This is the factor along each axis to draw the default
box. Dafuault is 0.5 to show the full box.
line_width : float, optional
Thickness of lines.
opacity : float, optional
Opacity of mesh. Should be between 0 and 1. Default 1.0
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
kwargs = locals()
_ = kwargs.pop('self')
_ = kwargs.pop('loc')
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
return renderer.add_bounding_box(**kwargs)
def remove_bounding_box(self, loc=None):
"""
Removes bounding box from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounding_box()
def remove_bounds_axes(self, loc=None):
"""
Removes bounds axes from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounds_axes()
def subplot(self, index_x, index_y):
"""
Sets the active subplot.
Parameters
----------
index_x : int
Index of the subplot to activate in the x direction.
index_y : int
Index of the subplot to activate in the y direction.
"""
self._active_renderer_index = self.loc_to_index((index_x, index_y))
def show_grid(self, **kwargs):
"""
A wrapped implementation of ``show_bounds`` to change default
behaviour to use gridlines and showing the axes labels on the outer
edges. This is intended to be silimar to ``matplotlib``'s ``grid``
function.
"""
kwargs.setdefault('grid', 'back')
kwargs.setdefault('location', 'outer')
kwargs.setdefault('ticks', 'both')
return self.show_bounds(**kwargs)
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
"""
Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
"""
self.renderer.set_scale(xscale, yscale, zscale, reset_camera)
@property
def scale(self):
""" The scaling of the active renderer. """
return self.renderer.scale
def _update_axes_color(self, color):
"""Internal helper to set the axes label color"""
prop_x = self.axes_actor.GetXAxisCaptionActor2D().GetCaptionTextProperty()
prop_y = self.axes_actor.GetYAxisCaptionActor2D().GetCaptionTextProperty()
prop_z = self.axes_actor.GetZAxisCaptionActor2D().GetCaptionTextProperty()
if color is None:
color = rcParams['font']['color']
color = parse_color(color)
for prop in [prop_x, prop_y, prop_z]:
prop.SetColor(color[0], color[1], color[2])
prop.SetShadow(False)
return
def add_scalar_bar(self, title=None, n_labels=5, italic=False,
bold=True, title_font_size=None,
label_font_size=None, color=None,
font_family=None, shadow=False, mapper=None,
width=None, height=None, position_x=None,
position_y=None, vertical=None,
interactive=False, fmt=None, use_opacity=True,
outline=False):
"""
Creates scalar bar using the ranges as set by the last input
mesh.
Parameters
----------
title : string, optional
Title of the scalar bar. Default None
n_labels : int, optional
Number of labels to use for the scalar bar.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
title_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
label_font_size : float, optional
Sets the size of the title font. Defaults to None and is sized
automatically.
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
width : float, optional
The percentage (0 to 1) width of the window for the colorbar
height : float, optional
The percentage (0 to 1) height of the window for the colorbar
position_x : float, optional
The percentage (0 to 1) along the windows's horizontal
direction to place the bottom left corner of the colorbar
position_y : float, optional
The percentage (0 to 1) along the windows's vertical
direction to place the bottom left corner of the colorbar
interactive : bool, optional
Use a widget to control the size and location of the scalar bar.
use_opacity : bool, optional
Optionally disply the opacity mapping on the scalar bar
outline : bool, optional
Optionally outline the scalar bar to make opacity mappings more
obvious.
Notes
-----
Setting title_font_size, or label_font_size disables automatic font
sizing for both the title and label.
"""
if font_family is None:
font_family = rcParams['font']['family']
if label_font_size is None:
label_font_size = rcParams['font']['label_size']
if title_font_size is None:
title_font_size = rcParams['font']['title_size']
if color is None:
color = rcParams['font']['color']
if fmt is None:
fmt = rcParams['font']['fmt']
if vertical is None:
if rcParams['colorbar_orientation'].lower() == 'vertical':
vertical = True
# Automatically choose size if not specified
if width is None:
if vertical:
width = rcParams['colorbar_vertical']['width']
else:
width = rcParams['colorbar_horizontal']['width']
if height is None:
if vertical:
height = rcParams['colorbar_vertical']['height']
else:
height = rcParams['colorbar_horizontal']['height']
# check if maper exists
if mapper is None:
if not hasattr(self, 'mapper'):
raise Exception('Mapper does not exist. ' +
'Add a mesh with scalars first.')
mapper = self.mapper
if title:
# Check that this data hasn't already been plotted
if title in list(self._scalar_bar_ranges.keys()):
rng = list(self._scalar_bar_ranges[title])
newrng = mapper.GetScalarRange()
oldmappers = self._scalar_bar_mappers[title]
# get max for range and reset everything
if newrng[0] < rng[0]:
rng[0] = newrng[0]
if newrng[1] > rng[1]:
rng[1] = newrng[1]
for m in oldmappers:
m.SetScalarRange(rng[0], rng[1])
mapper.SetScalarRange(rng[0], rng[1])
self._scalar_bar_mappers[title].append(mapper)
self._scalar_bar_ranges[title] = rng
# Color bar already present and ready to be used so returning
return
# Automatically choose location if not specified
if position_x is None or position_y is None:
try:
slot = min(self._scalar_bar_slots)
self._scalar_bar_slots.remove(slot)
self._scalar_bar_slot_lookup[title] = slot
except:
raise RuntimeError('Maximum number of color bars reached.')
if position_x is None:
if vertical:
position_x = rcParams['colorbar_vertical']['position_x']
position_x -= slot * width
else:
position_x = rcParams['colorbar_horizontal']['position_x']
if position_y is None:
if vertical:
position_y = rcParams['colorbar_vertical']['position_y']
else:
position_y = rcParams['colorbar_horizontal']['position_y']
position_y += slot * height
# Adjust to make sure on the screen
if position_x + width > 1:
position_x -= width
if position_y + height > 1:
position_y -= height
# parse color
color = parse_color(color)
# Create scalar bar
self.scalar_bar = vtk.vtkScalarBarActor()
self.scalar_bar.SetLookupTable(mapper.GetLookupTable())
self.scalar_bar.SetNumberOfLabels(n_labels)
# edit the size of the colorbar
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(position_x, position_y)
if fmt is not None:
self.scalar_bar.SetLabelFormat(fmt)
if vertical:
self.scalar_bar.SetOrientationToVertical()
else:
self.scalar_bar.SetOrientationToHorizontal()
if label_font_size is None or title_font_size is None:
self.scalar_bar.UnconstrainedFontSizeOn()
if n_labels:
label_text = self.scalar_bar.GetLabelTextProperty()
label_text.SetColor(color)
label_text.SetShadow(shadow)
# Set font
label_text.SetFontFamily(parse_font_family(font_family))
label_text.SetItalic(italic)
label_text.SetBold(bold)
if label_font_size:
label_text.SetFontSize(label_font_size)
# Set properties
if title:
rng = mapper.GetScalarRange()
self._scalar_bar_ranges[title] = rng
self._scalar_bar_mappers[title] = [mapper]
self.scalar_bar.SetTitle(title)
title_text = self.scalar_bar.GetTitleTextProperty()
title_text.SetJustificationToCentered()
title_text.SetItalic(italic)
title_text.SetBold(bold)
title_text.SetShadow(shadow)
if title_font_size:
title_text.SetFontSize(title_font_size)
# Set font
title_text.SetFontFamily(parse_font_family(font_family))
# set color
title_text.SetColor(color)
self._scalar_bar_actors[title] = self.scalar_bar
if interactive is None:
interactive = rcParams['interactive']
if shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
err_str = 'Interactive scalar bars disabled for multi-renderer plots'
raise Exception(err_str)
if interactive and hasattr(self, 'iren'):
self.scalar_widget = vtk.vtkScalarBarWidget()
self.scalar_widget.SetScalarBarActor(self.scalar_bar)
self.scalar_widget.SetInteractor(self.iren)
self.scalar_widget.SetEnabled(1)
rep = self.scalar_widget.GetRepresentation()
# self.scalar_widget.On()
if vertical is True or vertical is None:
rep.SetOrientation(1) # 0 = Horizontal, 1 = Vertical
else:
rep.SetOrientation(0) # 0 = Horizontal, 1 = Vertical
self._scalar_bar_widgets[title] = self.scalar_widget
if use_opacity:
self.scalar_bar.SetUseOpacity(True)
if outline:
self.scalar_bar.SetDrawFrame(True)
frame_prop = self.scalar_bar.GetFrameProperty()
frame_prop.SetColor(color)
else:
self.scalar_bar.SetDrawFrame(False)
self.add_actor(self.scalar_bar, reset_camera=False)
def update_scalars(self, scalars, mesh=None, render=True):
"""
Updates scalars of the an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.Iterable, vtki.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.ren_win.Render()
return
if isinstance(scalars, str):
# Grab scalar array if name given
scalars = get_scalar(mesh, scalars)
if scalars is None:
if render:
self.ren_win.Render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
_raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise Exception('No active scalars')
s = VN.vtk_to_numpy(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalar array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.ren_win.Render()
def update_coordinates(self, points, mesh=None, render=True):
"""
Updates the points of the an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Forces an update to the render window. Default True.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
if render:
self._render()
def close(self):
""" closes render window """
# must close out axes marker
if hasattr(self, 'axes_widget'):
del self.axes_widget
# reset scalar bar stuff
self._scalar_bar_slots = set(range(MAX_N_COLOR_BARS))
self._scalar_bar_slot_lookup = {}
self._scalar_bar_ranges = {}
self._scalar_bar_mappers = {}
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
if hasattr(self, '_style'):
del self._style
if hasattr(self, 'iren'):
self.iren.RemoveAllObservers()
del self.iren
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
def add_text(self, text, position=None, font_size=50, color=None,
font=None, shadow=False, name=None, loc=None):
"""
Adds text to plot object in the top left corner by default
Parameters
----------
text : str
The text to add the the rendering
position : tuple(float)
Length 2 tuple of the pixelwise position to place the bottom
left corner of the text box. Default is to find the top left corner
of the renderering window and place text box up there.
font : string, optional
Font name may be courier, times, or arial
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
Returns
-------
textActor : vtk.vtkTextActor
Text actor added to plot
"""
if font is None:
font = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if color is None:
color = rcParams['font']['color']
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
self.textActor = vtk.vtkTextActor()
self.textActor.SetPosition(position)
self.textActor.GetTextProperty().SetFontSize(font_size)
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONT_KEYS[font])
self.textActor.GetTextProperty().SetShadow(shadow)
self.textActor.SetInput(text)
self.add_actor(self.textActor, reset_camera=False, name=name, loc=loc)
return self.textActor
def open_movie(self, filename, framerate=24):
"""
Establishes a connection to the ffmpeg writer
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See "imagio.get_writer"
framerate : int, optional
Frames per second.
"""
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
self.mwriter = imageio.get_writer(filename, fps=framerate)
def open_gif(self, filename):
"""
Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in gif.
"""
if filename[-3:] != 'gif':
raise Exception('Unsupported filetype. Must end in .gif')
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = imageio.get_writer(filename, mode='I')
def write_frame(self):
""" Writes a single frame to the movie file """
if not hasattr(self, 'mwriter'):
raise AssertionError('This plotter has not opened a movie or GIF file.')
self.mwriter.append_data(self.image)
@property
def window_size(self):
""" returns render window size """
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
""" set the render window size """
self.ren_win.SetSize(window_size[0], window_size[1])
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = vtki.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = vtki.utilities.point_scalar(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
@property
def image_depth(self):
""" Returns an image array of current render window """
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
return self._run_image_filter(ifilter)
@property
def image(self):
""" Returns an image array of current render window """
if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'):
return self.last_image
ifilter = vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
if self.image_transparent_background:
ifilter.SetInputBufferTypeToRGBA()
else:
ifilter.SetInputBufferTypeToRGB()
return self._run_image_filter(ifilter)
def enable_eye_dome_lighting(self):
"""Enable eye dome lighting (EDL) for active renderer"""
return self.renderer.enable_eye_dome_lighting()
def disable_eye_dome_lighting(self):
"""Disable eye dome lighting (EDL) for active renderer"""
return self.renderer.disable_eye_dome_lighting()
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""
Adds lines to the plotting object.
Parameters
----------
lines : np.ndarray or vtki.PolyData
Points representing line segments. For example, two line segments
would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
width : float, optional
Thickness of lines
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
actor : vtk.vtkActor
Lines actor.
"""
if not isinstance(lines, np.ndarray):
raise Exception('Input should be an array of point segments')
lines = vtki.lines_from_points(lines)
# Create mapper and add lines
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# legend label
if label:
if not isinstance(label, str):
raise AssertionError('Label must be a string')
self._labels.append([lines, label, rgb_color])
# Create actor
self.scalar_bar = vtk.vtkActor()
self.scalar_bar.SetMapper(mapper)
self.scalar_bar.GetProperty().SetLineWidth(width)
self.scalar_bar.GetProperty().EdgeVisibilityOn()
self.scalar_bar.GetProperty().SetEdgeColor(rgb_color)
self.scalar_bar.GetProperty().SetColor(rgb_color)
self.scalar_bar.GetProperty().LightingOff()
# Add to renderer
self.add_actor(self.scalar_bar, reset_camera=False, name=name)
return self.scalar_bar
def remove_scalar_bar(self):
""" Removes scalar bar """
if hasattr(self, 'scalar_bar'):
self.remove_actor(self.scalar_bar, reset_camera=False)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color='k',
font_family=None, shadow=False,
show_points=True, point_color='k', point_size=5,
name=None):
"""
Creates a point actor with one label from list labels assigned to
each point.
Parameters
----------
points : np.ndarray
n x 3 numpy array of points.
labels : list
List of labels. Must be the same length as points.
italic : bool, optional
Italicises title and bar labels. Default False.
bold : bool, optional
Bolds title and bar labels. Default True
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : string or 3 item list, optional, defaults to black
Color of text.
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
font_family : string, optional
Font family. Must be either courier, times, or arial.
shadow : bool, optional
Adds a black shadow to the text. Defaults to False
show_points : bool, optional
Controls if points are visible. Default True
point_color : string or 3 item list, optional, defaults to black
Color of points (if visible).
Either a string, rgb list, or hex color string. For example:
text_color='white'
text_color='w'
text_color=[1, 1, 1]
text_color='#FFFFFF'
point_size : float, optional
Size of points (if visible)
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
labelMapper : vtk.vtkvtkLabeledDataMapper
VTK label mapper. Can be used to change properties of the labels.
"""
if font_family is None:
font_family = rcParams['font']['family']
if font_size is None:
font_size = rcParams['font']['size']
if len(points) != len(labels):
raise Exception('There must be one label for each point')
vtkpoints = vtki.PolyData(points)
vtklabels = vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# create label mapper
labelMapper = vtk.vtkLabeledDataMapper()
labelMapper.SetInputData(vtkpoints)
textprop = labelMapper.GetLabelTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
labelMapper.SetLabelModeToLabelFieldData()
labelMapper.SetFieldDataName('labels')
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
# add points
if show_points:
style = 'points'
else:
style = 'surface'
self.add_mesh(vtkpoints, style=style, color=point_color,
point_size=point_size)
self.add_actor(labelActor, reset_camera=False, name=name)
return labelMapper
def add_points(self, points, **kwargs):
""" Add points to a mesh """
kwargs['style'] = 'points'
self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
""" Adds arrows to plotting object """
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
direction[:,0] *= mag
direction[:,1] *= mag
direction[:,2] *= mag
pdata = vtki.vector_poly_data(cent, direction)
# Create arrow object
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img=None):
"""Internal helper for saving a NumPy image array"""
if not image.size:
raise Exception('Empty image. Have you run plot() first?')
# write screenshot to file
if isinstance(filename, str):
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
if not return_img:
return imageio.imwrite(filename, image)
imageio.imwrite(filename, image)
return image
def screenshot(self, filename=None, transparent_background=False,
return_img=None, window_size=None):
"""
Takes screenshot at current camera position
Parameters
----------
filename : str, optional
Location to write image to. If None, no image is written.
transparent_background : bool, optional
Makes the background transparent. Default False.
return_img : bool, optional
If a string filename is given and this is true, a NumPy array of
the image will be returned.
Returns
-------
img : numpy.ndarray
Array containing pixel RGB and alpha. Sized:
[Window height x Window width x 3] for transparent_background=False
[Window height x Window width x 4] for transparent_background=True
Examples
--------
>>> import vtki
>>> sphere = vtki.Sphere()
>>> plotter = vtki.Plotter()
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if hasattr(self, 'last_image'):
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise AttributeError('This plotter is unable to save a screenshot.')
if isinstance(self, Plotter):
# TODO: we need a consistent rendering function
self.render()
else:
self._render()
# debug: this needs to be called twice for some reason,
img = self.image
img = self.image
return self._save_image(img, filename, return_img)
def add_legend(self, labels=None, bcolor=(0.5, 0.5, 0.5), border=False,
size=None, name=None):
"""
Adds a legend to render window. Entries must be a list
containing one string and color entry for each item.
Parameters
----------
labels : list, optional
When set to None, uses existing labels as specified by
- add_mesh
- add_lines
- add_points
List contianing one entry for each item to be added to the
legend. Each entry must contain two strings, [label,
color], where label is the name of the item to add, and
color is the color of the label to add.
bcolor : list or string, optional
Background color, either a three item 0 to 1 RGB color
list, or a matplotlib color string (e.g. 'w' or 'white'
for a white color). If None, legend background is
disabled.
border : bool, optional
Controls if there will be a border around the legend.
Default False.
size : list, optional
Two float list, each float between 0 and 1. For example
[0.1, 0.1] would make the legend 10% the size of the
entire figure window.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
legend : vtk.vtkLegendBoxActor
Actor for the legend.
Examples
--------
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh, label='My Mesh')
>>> _ = plotter.add_mesh(othermesh, 'k', label='My Other Mesh')
>>> _ = plotter.add_legend()
>>> plotter.show() # doctest:+SKIP
Alternative manual example
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> othermesh = examples.load_uniform()
>>> legend_entries = []
>>> legend_entries.append(['My Mesh', 'w'])
>>> legend_entries.append(['My Other Mesh', 'k'])
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh)
>>> _ = plotter.add_mesh(othermesh, 'k')
>>> _ = plotter.add_legend(legend_entries)
>>> plotter.show() # doctest:+SKIP
"""
self.legend = vtk.vtkLegendBoxActor()
if labels is None:
# use existing labels
if not self._labels:
raise Exception('No labels input.\n\n' +
'Add labels to individual items when adding them to' +
'the plotting object with the "label=" parameter. ' +
'or enter them as the "labels" parameter.')
self.legend.SetNumberOfEntries(len(self._labels))
for i, (vtk_object, text, color) in enumerate(self._labels):
self.legend.SetEntry(i, vtk_object, text, parse_color(color))
else:
self.legend.SetNumberOfEntries(len(labels))
legendface = single_triangle()
for i, (text, color) in enumerate(labels):
self.legend.SetEntry(i, legendface, text, parse_color(color))
if size:
self.legend.SetPosition2(size[0], size[1])
if bcolor is None:
self.legend.UseBackgroundOff()
else:
self.legend.UseBackgroundOn()
self.legend.SetBackgroundColor(bcolor)
if border:
self.legend.BorderOn()
else:
self.legend.BorderOff()
# Add to renderer
self.add_actor(self.legend, reset_camera=False, name=name)
return self.legend
@property
def camera_position(self):
""" Returns camera position of the active render window """
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
""" Set camera position of the active render window """
self.renderer.camera_position = camera_location
def reset_camera(self):
"""
Reset camera so it slides along the vector defined from camera
position to focal point until all of the actors can be seen.
"""
self.renderer.reset_camera()
self._render()
def isometric_view(self):
"""DEPRECATED: Please use ``view_isometric``"""
return self.view_isometric()
def view_isometric(self):
"""
Resets the camera to a default isometric view showing all the
actors in the scene.
"""
return self.renderer.view_isometric()
def view_vector(self, vector, viewup=None):
return self.renderer.view_vector(vector, viewup=viewup)
def view_xy(self, negative=False):
"""View the XY plane"""
return self.renderer.view_xy(negative=negative)
def view_xz(self, negative=False):
"""View the XZ plane"""
return self.renderer.view_xz(negative=negative)
def view_yz(self, negative=False):
"""View the YZ plane"""
return self.renderer.view_yz(negative=negative)
def disable(self):
"""Disable this renderer's camera from being interactive"""
return self.renderer.disable()
def enable(self):
"""Enable this renderer's camera to be interactive"""
return self.renderer.enable()
def set_background(self, color, loc='all'):
"""
Sets background color
Parameters
----------
color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
loc : int, tuple, list, or str, optional
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If ``loc='all'`` then all
render windows will have their background set.
"""
if color is None:
color = rcParams['background']
if isinstance(color, str):
if color.lower() in 'paraview' or color.lower() in 'pv':
# Use the default ParaView background color
color = PV_BACKGROUND
else:
color = vtki.string_to_rgb(color)
if loc =='all':
for renderer in self.renderers:
renderer.SetBackground(color)
else:
renderer = self.renderers[self.loc_to_index(loc)]
renderer.SetBackground(color)
@property
def background_color(self):
""" Returns background color of the first render window """
return self.renderers[0].GetBackground()
@background_color.setter
def background_color(self, color):
""" Sets the background color of all the render windows """
self.set_background(color)
def remove_legend(self):
""" Removes legend actor """
if hasattr(self, 'legend'):
self.remove_actor(self.legend, reset_camera=False)
self._render()
def enable_cell_picking(self, mesh=None, callback=None):
"""
Enables picking of cells. Press r to enable retangle based
selection. Press "r" again to turn it off. Selection will be
saved to self.picked_cells.
Uses last input mesh for input
Parameters
----------
mesh : vtk.UnstructuredGrid, optional
UnstructuredGrid grid to select cells from. Uses last
input grid by default.
callback : function, optional
When input, calls this function after a selection is made.
The picked_cells are input as the first parameter to this function.
"""
if mesh is None:
if not hasattr(self, 'mesh'):
raise Exception('Input a mesh into the Plotter class first or '
+ 'or set it in this function')
mesh = self.mesh
def pick_call_back(picker, event_id):
extract = vtk.vtkExtractGeometry()
mesh.cell_arrays['orig_extract_id'] = np.arange(mesh.n_cells)
extract.SetInputData(mesh)
extract.SetImplicitFunction(picker.GetFrustum())
extract.Update()
self.picked_cells = vtki.wrap(extract.GetOutput())
if callback is not None:
callback(self.picked_cells)
area_picker = vtk.vtkAreaPicker()
area_picker.AddObserver(vtk.vtkCommand.EndPickEvent, pick_call_back)
self.enable_rubber_band_style()
self.iren.SetPicker(area_picker)
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None):
"""Genrates an orbital path around the data scene
Parameters
----------
facotr : float
A scaling factor when biulding the orbital extent
n_points : int
number of points on the orbital path
viewup : list(float)
the normal to the orbital plane
z_shift : float, optional
shift the plane up/down from the center of the scene by this amount
"""
if viewup is None:
viewup = rcParams['camera']['viewup']
center = list(self.center)
bnds = list(self.bounds)
if z_shift is None:
z_shift = (bnds[5] - bnds[4]) * factor
center[2] = center[2] + z_shift
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(point):
"""Given a position point, move the current camera's focal point to that
point. The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
"""
return self.iren.FlyTo(self.renderer, *point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None, bkg=True):
"""Orbit on the given path focusing on the focus point
Parameters
----------
path : vtki.PolyData
Path of orbital points. The order in the points is the order of
travel
focus : list(float) of length 3, optional
The point ot focus the camera.
step : float, optional
The timestep between flying to each camera position
viewup : list(float)
the normal to the orbital plane
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = rcParams['camera']['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_vtki_obj(path):
path = vtki.PolyData(path)
points = path.points
def orbit():
"""Internal thread for running the orbit"""
for point in points:
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
time.sleep(step)
if bkg:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
return
def export_vtkjs(self, filename, compress_arrays=False):
"""
Export the current rendering scene as a VTKjs scene for
rendering in a web browser
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(vtki.FIGURE_PATH, filename)
return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
class Plotter(BasePlotter):
""" Plotting object to display vtk meshes or numpy arrays.
Example
-------
>>> import vtki
>>> from vtki import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = vtki.Plotter()
>>> _ = plotter.add_mesh(mesh, color='red')
>>> _ = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show() # doctest:+SKIP
Parameters
----------
off_screen : bool, optional
Renders off screen when False. Useful for automated screenshots.
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter notebook.
Assumes a jupyter console is active. Automatically enables off_screen.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render
window.
border : bool, optional
Draw a border around each render window. Default False.
border_color : string or 3 item list, optional, defaults to white
Either a string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
"""
last_update_time = 0.0
q_pressed = False
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
border=None, border_color='k', border_width=1.0,
window_size=None):
"""
Initialize a vtk plotting object
"""
super(Plotter, self).__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width)
log.debug('Initializing')
def on_timer(iren, event_id):
""" Exit application if interactive renderer stops """
if event_id == 'TimerEvent':
self.iren.TerminateApp()
if off_screen is None:
off_screen = vtki.OFF_SCREEN
if notebook is None:
if run_from_ipython():
try:
notebook = type(get_ipython()).__module__.startswith('ipykernel.')
except NameError:
pass
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
if window_size is None:
window_size = vtki.rcParams['window_size']
# initialize render window
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.SetBorders(True)
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
else: # Allow user to interact
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.LightFollowCameraOff()
self.iren.SetDesiredUpdateRate(30.0)
self.iren.SetRenderWindow(self.ren_win)
self.enable_trackball_style()
self.iren.AddObserver("KeyPressEvent", self.key_press_event)
self.update_style()
# for renderer in self.renderers:
# self.iren.SetRenderWindow(renderer)
# Set background
self.set_background(rcParams['background'])
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
if hasattr(self, 'iren'):
self.iren.AddObserver(vtk.vtkCommand.TimerEvent, on_timer)
def show(self, title=None, window_size=None, interactive=True,
auto_close=True, interactive_update=False, full_screen=False,
screenshot=False, return_img=False, use_panel=None):
"""
Creates plotting window
Parameters
----------
title : string, optional
Title of plotting window.
window_size : list, optional
Window size in pixels. Defaults to [1024, 768]
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
auto_close : bool, optional
Enabled by default. Exits plotting session when user
closes the window when interactive is True.
interactive_update: bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call Update() in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
window_size. Default False.
use_panel : bool, optional
If False, the interactive rendering from panel will not be used in
notebooks
Returns
-------
cpos : list
List of camera position, focal point, and view up
"""
if use_panel is None:
use_panel = rcParams['use_panel']
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
self._first_time = False
if title:
self.ren_win.SetWindowName(title)
# if full_screen:
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
self.ren_win.SetSize(window_size[0], window_size[1])
# Render
log.debug('Rendering')
self.ren_win.Render()
if interactive and (not self.off_screen):
try: # interrupts will be caught here
log.debug('Starting iren')
self.update_style()
self.iren.Initialize()
if not interactive_update:
self.iren.Start()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# Keep track of image for sphinx-gallery
self.last_image = self.screenshot(screenshot, return_img=True)
# Get camera position before closing
cpos = self.camera_position
if self.notebook:
# sanity check
try:
import IPython
except ImportError:
raise Exception('Install IPython to display image in a notebook')
disp = None
if use_panel:
try:
from panel.pane import VTK as panel_display
disp = panel_display(self.ren_win, sizing_mode='stretch_width',
height=400)
except:
pass
if disp is None or self.shape != (1,1):
import PIL.Image
disp = IPython.display.display(PIL.Image.fromarray(self.last_image))
if auto_close:
self.close()
if self.notebook:
return disp
if return_img or screenshot == True:
return cpos, self.last_image
return cpos
def plot(self, *args, **kwargs):
""" Present for backwards compatibility. Use `show()` instead """
return self.show(*args, **kwargs)
def render(self):
""" renders main window """
self.ren_win.Render()
def single_triangle():
""" A single PolyData triangle """
points = np.zeros((3, 3))
points[1] = [1, 0, 0]
points[2] = [0.5, 0.707, 0]
cells = np.array([[3, 0, 1, 2]], ctypes.c_long)
return vtki.PolyData(points, cells)
def parse_color(color):
""" Parses color into a vtk friendly rgb list """
if color is None:
color = rcParams['color']
if isinstance(color, str):
return vtki.string_to_rgb(color)
elif len(color) == 3:
return color
else:
raise Exception("""
Invalid color input
Must ba string, rgb list, or hex color string. For example:
color='white'
color='w'
color=[1, 1, 1]
color='#FFFFFF'""")
def parse_font_family(font_family):
""" checks font name """
# check font name
font_family = font_family.lower()
if font_family not in ['courier', 'times', 'arial']:
raise Exception('Font must be either "courier", "times" ' +
'or "arial"')
return FONT_KEYS[font_family]
def plot_compare_four(data_a, data_b, data_c, data_d, disply_kwargs=None,
plotter_kwargs=None, show_kwargs=None, screenshot=None,
camera_position=None, outline=None, outline_color='k',
labels=('A', 'B', 'C', 'D')):
"""Plot a 2 by 2 comparison of data objects. Plotting parameters and camera
positions will all be the same.
"""
datasets = [[data_a, data_b], [data_c, data_d]]
labels = [labels[0:2], labels[2:4]]
if plotter_kwargs is None:
plotter_kwargs = {}
if disply_kwargs is None:
disply_kwargs = {}
if show_kwargs is None:
show_kwargs = {}
p = vtki.Plotter(shape=(2,2), **plotter_kwargs)
for i in range(2):
for j in range(2):
p.subplot(i, j)
p.add_mesh(datasets[i][j], **disply_kwargs)
p.add_text(labels[i][j])
if is_vtki_obj(outline):
p.add_mesh(outline, color=outline_color)
if camera_position is not None:
p.camera_position = camera_position
return p.show(screenshot=screenshot, **show_kwargs)
|
development_server.py
|
import logging
import signal
from contextlib import contextmanager
from threading import Thread
from time import sleep
from typing import Union, Callable, Generator, List
from wsgiref.simple_server import make_server, WSGIServer
LOGGER = logging.getLogger(__name__)
class DevelopmentServer:
def __init__(self, wsgi_app: Callable, host: str = None, port: int = 0, stop_signals: List[int] = None):
self.wsgi_app = wsgi_app
self.host = host or "127.0.0.1"
self.port = port
self.stop_signals = stop_signals or [signal.SIGTERM, signal.SIGINT]
self.server: Union[WSGIServer, None] = None
self._thread: Union[Thread, None] = None
self._is_running = False
def start(self, blocking: bool = True) -> int:
LOGGER.debug("Creating WSGI server for host %s and port %d", self.host, self.port)
self._register_stop_signals()
self.server = make_server(self.host, self.port, self.wsgi_app)
self._thread = Thread(target=self.server.serve_forever)
self._thread.start()
self._is_running = True
if blocking:
LOGGER.info(
"Starting development server in blocking mode at http://%s:%d/", self.host, self.server.server_port
)
self._wait_until_stopped()
else:
LOGGER.info("Development server is now running at http://%s:%d/", self.host, self.port)
return self.server.server_port
def _wait_until_stopped(self):
while self._is_running:
sleep(0.5)
def _register_stop_signals(self):
for stop_signal in self.stop_signals:
LOGGER.debug("Registering signal %d as stop signal", stop_signal)
signal.signal(stop_signal, self._stop_from_signal)
def _stop_from_signal(self, signum: int, __):
LOGGER.info("Received signal %d", signum)
self.stop()
def stop(self):
LOGGER.info("Stopping development server")
self._is_running = False
self.server.shutdown()
@contextmanager
def start_in_context(self) -> Generator[int, None, None]:
port = self.start(blocking=False)
try:
yield port
finally:
self.stop()
|
miner.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"transactions": None},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the transactions that this node has in a list.
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time()-start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a transaction
# First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(url = MINER_NODE_URL + '/txion', params = {'update':MINER_ADDRESS}).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}) + "\n")
a.send(BLOCKCHAIN)
requests.get(url = MINER_NODE_URL + '/blocks', params = {'update':MINER_ADDRESS})
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(url = node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
# Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a transaction with your
address. Called when a user tries to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
KW DOLLAR v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/kwcash/kwdollar\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
# Start server to receive transactions
p2 = Process(target=node.run(), args=b)
p2.start()
|
test_basic_3.py
|
# coding: utf-8
import gc
import logging
import os
import sys
import time
import subprocess
import numpy as np
import pytest
import ray.cluster_utils
from ray._private.test_utils import (
dicts_equal,
wait_for_pid_to_exit,
wait_for_condition,
)
from ray.autoscaler._private.constants import RAY_PROCESSES
from pathlib import Path
import ray
import psutil
logger = logging.getLogger(__name__)
def test_auto_global_gc(shutdown_only):
# 100MB
ray.init(num_cpus=1, object_store_memory=100 * 1024 * 1024)
@ray.remote
class Test:
def __init__(self):
self.collected = False
import gc
gc.disable()
def gc_called(phase, info):
self.collected = True
gc.callbacks.append(gc_called)
def circular_ref(self):
# 20MB
buf1 = b"0" * (10 * 1024 * 1024)
buf2 = b"1" * (10 * 1024 * 1024)
ref1 = ray.put(buf1)
ref2 = ray.put(buf2)
b = []
a = []
b.append(a)
a.append(b)
b.append(ref1)
a.append(ref2)
return a
def collected(self):
return self.collected
test = Test.remote()
# 60MB
for i in range(3):
ray.get(test.circular_ref.remote())
time.sleep(2)
assert not ray.get(test.collected.remote())
# 80MB
for _ in range(1):
ray.get(test.circular_ref.remote())
time.sleep(2)
assert ray.get(test.collected.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.worker.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return dicts_equal(true_resources, accepted_resources)
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
available_resources = ray.available_resources()
if ("CPU" in available_resources
and ray.available_resources()["CPU"] == 2.0
and "GPU" in available_resources
and ray.available_resources()["GPU"] == 2.0
and "Custom" in available_resources
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
@pytest.mark.skipif(sys.platform == "win32", reason="Fails on windows")
def test_background_tasks_with_max_calls(shutdown_only):
ray.init(
# TODO (Alex): We need to fix
# https://github.com/ray-project/ray/issues/20203 to remove this flag.
num_cpus=2,
_system_config={"worker_cap_initial_backoff_delay_ms": 0})
@ray.remote
def g():
time.sleep(.1)
return 0
@ray.remote(max_calls=1, max_retries=0)
def f():
return [g.remote()]
nested = ray.get([f.remote() for _ in range(10)])
# Should still be able to retrieve these objects, since f's workers will
# wait for g to finish before exiting.
ray.get([x[0] for x in nested])
@ray.remote(max_calls=1, max_retries=0)
def f():
return os.getpid(), g.remote()
nested = ray.get([f.remote() for _ in range(10)])
while nested:
pid, g_id = nested.pop(0)
assert ray.get(g_id) == 0
del g_id
# Necessary to dereference the object via GC, so the worker can exit.
gc.collect()
wait_for_pid_to_exit(pid)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_fair_queueing(shutdown_only):
ray.init(
num_cpus=1,
_system_config={
# Having parallel leases is slow in this case
# because tasks are scheduled FIFO,
# the more parallism we have,
# the more workers we need to start to execute f and g tasks
# before we can execute the first h task.
"max_pending_lease_requests_per_scheduling_category": 1,
"worker_cap_enabled": True,
})
@ray.remote
def h():
return 0
@ray.remote
def g():
return ray.get(h.remote())
@ray.remote
def f():
return ray.get(g.remote())
# This will never finish without fair queueing of {f, g, h}:
# https://github.com/ray-project/ray/issues/3644
timeout = 510.0 if sys.platform == "win32" else 60.0
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=timeout, num_returns=1000)
assert len(ready) == 1000, len(ready)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_actor_killing(shutdown_only):
# This is to test create and kill an actor immediately
import ray
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Actor:
def foo(self):
return None
worker_1 = Actor.remote()
ray.kill(worker_1)
worker_2 = Actor.remote()
assert ray.get(worker_2.foo.remote()) is None
ray.kill(worker_2)
worker_1 = Actor.options(max_restarts=1).remote()
ray.kill(worker_1, no_restart=False)
assert ray.get(worker_1.foo.remote()) is None
ray.kill(worker_1, no_restart=False)
worker_2 = Actor.remote()
assert ray.get(worker_2.foo.remote()) is None
def test_actor_scheduling(shutdown_only):
ray.init()
@ray.remote
class A:
def run_fail(self):
ray.actor.exit_actor()
def get(self):
return 1
a = A.remote()
a.run_fail.remote()
with pytest.raises(Exception):
ray.get([a.get.remote()])
@pytest.mark.skipif(sys.platform == "win32", reason="Fails on windows")
def test_worker_startup_count(ray_start_cluster):
"""Test that no extra workers started while no available cpu resources
in cluster."""
cluster = ray_start_cluster
# Cluster total cpu resources is 4.
cluster.add_node(
num_cpus=4, _system_config={
"debug_dump_period_milliseconds": 100,
})
ray.init(address=cluster.address)
# A slow function never returns. It will hold cpu resources all the way.
@ray.remote
def slow_function():
while True:
time.sleep(1000)
# Flood a large scale lease worker requests.
for i in range(10000):
# Use random cpu resources to make sure that all tasks are sent
# to the raylet. Because core worker will cache tasks with the
# same resource shape.
num_cpus = 0.24 + np.random.uniform(0, 0.01)
slow_function.options(num_cpus=num_cpus).remote()
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
num_workers = int(line[len(num_workers_prefix):])
return num_workers
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
timeout_limit = 40 if sys.platform == "win32" else 10
start = time.time()
wait_for_condition(lambda: get_num_workers() == 16, timeout=timeout_limit)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# Check that no more workers started for a while.
for i in range(100):
# Sometimes the debug state file can be empty. Retry if needed.
for _ in range(3):
num = get_num_workers()
if num is None:
print("Retrying parse debug_state.txt")
time.sleep(0.05)
else:
break
assert num == 16
time.sleep(0.1)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_function_unique_export(ray_start_regular):
@ray.remote
def f():
pass
@ray.remote
def g():
ray.get(f.remote())
ray.get(g.remote())
num_exports = ray.worker.global_worker.redis_client.llen("Exports")
ray.get([g.remote() for _ in range(5)])
assert ray.worker.global_worker.redis_client.llen("Exports") == num_exports
@pytest.mark.skipif(
sys.platform not in ["win32", "darwin"],
reason="Only listen on localhost by default on mac and windows.")
@pytest.mark.parametrize("start_ray", ["ray_start_regular", "call_ray_start"])
def test_listen_on_localhost(start_ray, request):
"""All ray processes should listen on localhost by default
on mac and windows to prevent security popups.
"""
request.getfixturevalue(start_ray)
process_infos = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
process_infos.append((proc, proc.name(), proc.cmdline()))
except psutil.Error:
pass
for keyword, filter_by_cmd in RAY_PROCESSES:
for candidate in process_infos:
proc, proc_cmd, proc_cmdline = candidate
corpus = (proc_cmd if filter_by_cmd else
subprocess.list2cmdline(proc_cmdline))
if keyword not in corpus:
continue
for connection in proc.connections():
if connection.status != psutil.CONN_LISTEN:
continue
# ip can be 127.0.0.1 or ::127.0.0.1
assert "127.0.0.1" in connection.laddr.ip
def test_job_id_consistency(ray_start_regular):
@ray.remote
def foo():
return "bar"
@ray.remote
class Foo:
def ping(self):
return "pong"
@ray.remote
def verify_job_id(job_id, new_thread):
def verify():
current_task_id = ray.runtime_context.get_runtime_context().task_id
assert job_id == current_task_id.job_id()
obj1 = foo.remote()
assert job_id == obj1.job_id()
obj2 = ray.put(1)
assert job_id == obj2.job_id()
a = Foo.remote()
assert job_id == a._actor_id.job_id
obj3 = a.ping.remote()
assert job_id == obj3.job_id()
if not new_thread:
verify()
else:
exc = []
def run():
try:
verify()
except BaseException as e:
exc.append(e)
import threading
t = threading.Thread(target=run)
t.start()
t.join()
if len(exc) > 0:
raise exc[0]
job_id = ray.runtime_context.get_runtime_context().job_id
ray.get(verify_job_id.remote(job_id, False))
ray.get(verify_job_id.remote(job_id, True))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
configuration.py
|
# Copyright 2021 Amazon.com.
# SPDX-License-Identifier: MIT
import typing
from . import _asyncio_wrapper
import asyncio
from threading import Thread
from typing import Callable
import awsiot.greengrasscoreipc.client as client
from awsiot.greengrasscoreipc import connect
from awsiot.greengrasscoreipc.model import (
GetConfigurationRequest,
SendConfigurationValidityReportRequest,
UpdateConfigurationRequest,
SubscribeToConfigurationUpdateRequest,
SubscribeToValidateConfigurationUpdatesRequest,
ConfigurationUpdateEvents,
ConfigurationUpdateEvent,
ValidateConfigurationUpdateEvent,
ValidateConfigurationUpdateEvents,
ConfigurationValidityReport,
ConfigurationValidityStatus
)
import concurrent.futures
from . import BaseClient
class _ConfigurationUpdateStreamHandler(client.SubscribeToConfigurationUpdateStreamHandler):
def __init__(self, handler: Callable[[ConfigurationUpdateEvent], None], error_handler: Callable[[Exception], None] ):
self._handler = handler
self._error_handler = error_handler
def on_stream_event(self, event: ConfigurationUpdateEvents) -> None:
update_event = event.configuration_update_event
t = Thread(target=self._handler, args=[update_event.component_name, "/"+"/".join(update_event.key_path)])
t.start()
def on_stream_error(self, error: Exception)-> bool:
t = Thread(target=self._error_handler, args=[error])
t.start()
return True
def on_stream_closed(self) -> None:
pass
class _ValidateConfigurationStreamHandler(client.SubscribeToValidateConfigurationUpdatesStreamHandler):
def __init__(self, ipc_client, handler: Callable[[typing.Dict[str, typing.Any]], bool], error_handler: Callable[[Exception], None], timeout: int ):
self._handler = handler
self._error_handler = error_handler
self._ipc_client = ipc_client
self._timeout = timeout
def on_stream_event(self, event: ValidateConfigurationUpdateEvents) -> None:
'''For each event we call a wrapper method that invokes the validation handler and
publishes the report with the status to the IPC
'''
validate_event = event.validate_configuration_update_event
t = Thread(target=self.response_wrapper, args=[validate_event.configuration])
t.start()
def on_stream_error(self, error: Exception)-> bool:
t = Thread(target=self._error_handler, args=[error])
t.start()
return True
def on_stream_closed(self) -> None:
pass
def response_wrapper(self, event: ValidateConfigurationUpdateEvent) -> None:
(accepted, message) = self._handler(event.configuration)
request = SendConfigurationValidityReportRequest()
report = ConfigurationValidityReport()
report.deployment_id = event.deployment_id
report.message = message
if accepted:
report.status = ConfigurationValidityStatus.ACCEPTED
else:
report.status = ConfigurationValidityStatus.REJECTED
request.configuration_validity_report = report
operation = self._ipc_client.new_send_configuration_validity_report_operation()
future = operation.activate(request)
try:
future.get_result(self._timeout)
except ex:
raise ex
class Client(BaseClient):
"""Create an client to interact with the Component Configuration APIs.
Optionally one can pass a GreengrassCoreIPCClient and a timeout for the async operations
"""
def get_configuration_async(self, component:str, key_path:str) -> asyncio.Future:
"""Gets the component configuration at keypath
keypath is a JSON path /key1/key2/0
Return the a Future that resolves to a GetConfigurationResponse object
"""
request = GetConfigurationRequest()
request.component_name = component
request.key_path = key_path
operation = self._ipc_client.new_get_configuration()
operation.activate(request)
future = operation.get_response()
return future
def get_configuration(self, component:str, key_path:str) -> typing.Any:
"""Publishes a message synchronously to AWS IoT Core via Greengrass connection
Throws an exception if the publish fails
"""
try:
future = self.get_configuration_async(component=component, key_path=key_path)
result = future.result(self._timeout)
return result.value
except Exception as ex:
raise ex
def update_configuration_async(self, component:str, key_path:str, value: typing.Dict[str, typing.Any]) -> concurrent.futures.Future:
"""Updates the component configuration at keypath by merging the value
keypath is a JSON path
Return the a Future that resolves to a GetConfigurationResponse object
"""
request = UpdateConfigurationRequest()
request.component_name = component
request.key_path = key_path.split("/")
request.value_to_merge = value
operation = self._ipc_client.new_update_configuration()
operation.activate(request)
future = operation.get_response()
return future
def update_configuration(self, component:str, key_path:str, value: typing.Dict[str, typing.Any]) -> None:
"""Updates the component configuration at keypath by merging the value
keypath is a JSON path
Throws an exception if the publish fails
"""
try:
future = self.update_configuration_async(component=component, key_path=key_path, value=value)
future.result(self._timeout)
except Exception as ex:
raise ex
def subscribe_to_validate_requests_async(self, handler: Callable[[typing.Dict[str, typing.Any]], bool], error_handler: Callable[[Exception], None]) -> concurrent.futures.Future:
"""Subscribes to configuration validation requests.
The handler should accept a Dict object as parameter and return a True or False.
Unhandled exceptions in the handler code will be sent to the error_handler
"""
request = SubscribeToValidateConfigurationUpdatesRequest()
_handler = _ValidateConfigurationStreamHandler(handler, error_handler, timeout=self._timeout)
operation = self._ipc_client.new_subscribe_to_validate_configuration_updates(_handler)
operation.activate(request)
future = operation.get_response()
return future
def subscribe_to_validate_requests(self, handler: Callable[[typing.Dict[str, typing.Any]], bool]):
"""Subscribes to configuration validation requests.
The handler should accept a Dict object as parameter and return a True or False.
Throw an exception on errors
"""
try:
future = self.subscribe_to_validate_requests_async(handler, self._sync_error_handler)
future.result(self._timeout)
except Exception as ex:
raise ex
def subscribe_to_configuration_updates_async(self, topic: str, handler: Callable[[str, str], None], error_handler: Callable[[Exception], None]) -> concurrent.futures.Future:
"""Subscribes to configuration updates
Unhandled exceptions in the handler code will be sent to the error_handler
The handler function receives two paramters: the component_name and the key_path of the configuration that changed. key_path is in JSON Path format.
The handler is responsible to call 'get_configuration'
"""
request = SubscribeToConfigurationUpdateRequest()
_handler = _ConfigurationUpdateStreamHandler(handler, error_handler)
operation = self._ipc_client.new_subscribe_to_configuration_updates(_handler)
operation.activate(request)
future = operation.get_response()
return future
def subscribe_to_configuration_updates(self, topic: str, handler: Callable[[str, str], None]):
"""Subscribes to configuration updates
The handler function receives two paramters: the component_name and the key_path of the configuration that changed. key_path is in JSON Path format.
The handler is responsible to call 'get_configuration'
"""
try:
future = self.subscribe_to_configuration_updates_async(topic, handler, self._sync_error_handler)
future.result(self._timeout)
except Exception as ex:
raise ex
|
_debugger_case_check_tracer.py
|
import threading, atexit, sys
from collections import namedtuple
import os.path
if sys.version_info[0] >= 3:
from _thread import start_new_thread
else:
from thread import start_new_thread
FrameInfo = namedtuple('FrameInfo', 'filename, name, f_trace')
def _atexit():
sys.stderr.flush()
sys.stdout.flush()
# Register the TEST SUCEEDED msg to the exit of the process.
atexit.register(_atexit)
def _iter_frame_info(frame):
while frame is not None:
yield FrameInfo(
os.path.basename(frame.f_code.co_filename),
frame.f_code.co_name,
frame.f_trace.__name__ if frame.f_trace is not None else "None"
)
frame = frame.f_back
def check_frame_info(expected):
found = list(_iter_frame_info(sys._getframe().f_back))
def fail():
raise AssertionError('Expected:\n%s\n\nFound:\n%s\n' % (
'\n'.join(str(x) for x in expected),
'\n'.join(str(x) for x in found)))
for found_info, expected_info in zip(found, expected):
if found_info.filename != expected_info.filename or found_info.name != expected_info.name:
fail()
for f_trace in expected_info.f_trace.split('|'):
if f_trace == found_info.f_trace:
break
else:
fail()
def thread_func():
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='_bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='_bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='__bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='__bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None'),
])
th = threading.Thread(target=thread_func)
th.setDaemon(True)
th.start()
event = threading.Event()
def thread_func2():
try:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func2', f_trace='trace_exception'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='trace_unhandled_exceptions')
])
finally:
event.set()
start_new_thread(thread_func2, ())
event.wait()
th.join()
# This is a bit tricky: although we waited on the event, there's a slight chance
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
import time
time.sleep(.3)
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='_pydev_execfile.py', name='execfile', f_trace='None'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch'),
])
print('TEST SUCEEDED')
|
gps.py
|
import argparse
from functools import reduce
import logging
import operator
import os
import platform
import threading
import time
import pynmea2
import serial
import utm
logger = logging.getLogger(__name__)
def is_mac():
return "Darwin" == platform.system()
class Gps:
def __init__(self, serial:str, baudrate:int = 9600, timeout:float = 0.5, debug = False):
self.serial = serial
self.baudrate = baudrate
self.timeout = timeout
self.debug = debug
self.positions = [] # tuple of (timestamp, longitude, latitude)
self.gps = None
self.lock = threading.Lock()
self.running = True
self._open()
self.clear()
def _open(self):
with self.lock:
self.gps = serial.Serial(self.serial, baudrate=self.baudrate, timeout=self.timeout)
def clear(self):
"""
Clear the positions buffer
"""
with self.lock:
try:
if self.gps is not None and self.gps.is_open:
self.positions = []
self.gps.reset_input_buffer()
except serial.serialutil.SerialException:
pass
def _readline(self) -> str:
"""
Read a line from the gps in a threadsafe manner
returns line if read and None if no line was read
"""
if self.lock.acquire(blocking=False):
try:
# TODO: Serial.in_waiting _always_ returns 0 in Macintosh
if self.gps is not None and self.gps.is_open and (is_mac() or self.gps.in_waiting):
return self.gps.readline().decode()
except serial.serialutil.SerialException:
pass
except UnicodeDecodeError:
# the first sentence often includes mis-framed garbase
pass # ignore and keep going
finally:
self.lock.release()
return None
def poll(self, timestamp=None):
#
# read a line and convert to a position
# in a threadsafe manner
#
# if there are characters waiting
# then read the line and parse it
#
if self.running:
if timestamp is None:
timestamp = time.time()
line = self._readline()
if line:
if self.debug:
logger.info(line)
position = getGpsPosition(line, debug=self.debug)
if position:
# (timestamp, longitude latitude)
return (timestamp, position[0], position[1])
return None
def run(self):
if self.running:
#
# in non-threaded mode, just read a single reading and return it
#
if self.gps is not None:
position = self.poll(time.time())
if position:
# [(timestamp, longitude, latitude)]
return [position]
return []
def run_threaded(self):
if not self.running:
return []
#
# return the accumulated readings
#
with self.lock:
positions = self.positions
self.positions = []
return positions
def update(self):
#
# open serial port and run an infinite loop.
# NOTE: this is NOT compatible with non-threaded run()
#
buffered_positions = [] # local read buffer
while self.running:
position = self.poll(time.time())
if position:
buffered_positions.append(position)
if buffered_positions:
#
# make sure we access self.positions in
# a threadsafe manner.
# This will NOT block:
# - If it can't write then it will leave
# readings in buffered_positions.
# - If it can write then it will moved the
# buffered_positions into self.positions
# and clear the buffer.
#
if self.lock.acquire(blocking=False):
try:
self.positions += buffered_positions
buffered_positions = []
finally:
self.lock.release()
time.sleep(0) # give other threads time
def shutdown(self):
self.running = False
with self.lock:
try:
if self.gps is not None and self.gps.is_open:
self.gps.close()
except serial.serialutil.SerialException:
pass
self.gps = None
def getGpsPosition(line, debug=False):
"""
Given a line emitted by a GPS module,
Parse out the position and return as a
tuple of float (longitude, latitude) as meters.
If it cannot be parsed or is not a position message,
then return None.
"""
if not line:
return None
line = line.strip()
if not line:
return None
#
# must start with $ and end with checksum
#
if '$' != line[0]:
logger.info("NMEA Missing line start")
return None
if '*' != line[-3]:
logger.info("NMEA Missing checksum")
return None
nmea_checksum = parse_nmea_checksum(line) # ## checksum hex digits as int
nmea_msg = line[1:-3] # msg without $ and *## checksum
nmea_parts = nmea_msg.split(",")
message = nmea_parts[0]
if (message == "GPRMC") or (message == "GNRMC"):
#
# like '$GPRMC,003918.00,A,3806.92281,N,12235.64362,W,0.090,,060322,,,D*67'
# GPRMC = Recommended minimum specific GPS/Transit data
#
# make sure the checksum checks out
#
calculated_checksum = calculate_nmea_checksum(line)
if nmea_checksum != calculated_checksum:
logger.info(f"NMEA checksum does not match: {nmea_checksum} != {calculated_checksum}")
return None
#
# parse against a known parser to check our parser
# TODO: if we hit a lot of corner cases that cause our
# parser to fail, then switch over to the libarry.
# Conversely, if our parser works then use it as
# it is very lightweight.
#
if debug:
try:
msg = pynmea2.parse(line)
logger.info(f"nmea.longitude({msg.longitude}, nmea.latitude({msg.latitude})")
except pynmea2.ParseError as e:
logger.info('Ignoring NMEA parse error: {}'.format(e))
# Reading the GPS fix data is an alternative approach that also works
if nmea_parts[2] == 'V':
# V = Warning, most likely, there are no satellites in view...
logger.info("GPS receiver warning; position not valid")
else:
#
# Convert the textual nmea position into degrees
#
longitude = nmea_to_degrees(nmea_parts[5], nmea_parts[6])
latitude = nmea_to_degrees(nmea_parts[3], nmea_parts[4])
# print(f"Your position: lon = ({longitude}), lat = ({latitude})")
#
# convert position in degrees to local meters
#
utm_position = utm.from_latlon(latitude, longitude)
# print(f"Your utm position: lon - ({utm_position[1]:.6f}), lat = ({utm_position[0]:.6f})")
# return (longitude, latitude) as float degrees
return(utm_position[1], utm_position[0])
else:
# Non-position message OR invalid string
# print(f"Ignoring line {line}")
pass
return None
def parse_nmea_checksum(nmea_line):
"""
Given the complete nmea line (including starting '$' and ending checksum '*##')
calculate the checksum from the body of the line.
NOTE: this does not check for structural correctness, so you
should check that '$' and '*##' checksum are present before
calling this function.
"""
return int(nmea_line[-2:], 16) # checksum hex digits as int
def calculate_nmea_checksum(nmea_line):
"""
Given the complete nmea line (including starting '$' and ending checksum '*##')
calculate the checksum from the body of the line.
NOTE: this does not check for structural correctness, so you
should check that '$' and '*##' checksum are present
and that the checksum matches before calling this function.
"""
#
# xor all characters in the message to get a one byte checksum.
# don't include starting '$' or trailing checksum '*##'
#
return reduce(operator.xor, map(ord, nmea_line[1:-3]), 0)
def nmea_to_degrees(gps_str, direction):
"""
Convert a gps coordinate string formatted as:
DDDMM.MMMMM, where DDD denotes the degrees (which may have zero to 3 digits)
and MM.MMMMM denotes the minutes
to a float in degrees.
"""
if not gps_str or gps_str == "0":
return 0
#
# pull out the degrees and minutes
# and then combine the minutes
#
parts = gps_str.split(".")
degrees_str = parts[0][:-2] # results in zero to 3 digits
minutes_str = parts[0][-2:] # always results in 2 digits
if 2 == len(parts):
minutes_str += "." + parts[1] # combine whole and fractional minutes
#
# convert degrees to a float
#
degrees = 0.0
if len(degrees_str) > 0:
degrees = float(degrees_str)
#
# convert minutes a float in degrees
#
minutes = 0.0
if len(minutes_str) > 0:
minutes = float(minutes_str) / 60
#
# sum up the degrees and apply the direction as a sign
#
return (degrees + minutes) * (-1 if direction in ['W', 'S'] else 1)
#
# The __main__ self test can log position or optionally record a set of waypoints
#
if __name__ == "__main__":
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import sys
import readchar
def stats(data):
"""
Calculate (min, max, mean, std_deviation) of a list of floats
"""
if not data:
return None
count = len(data)
min = None
max = None
sum = 0
for x in data:
if min is None or x < min:
min = x
if max is None or x > max:
max = x
sum += x
mean = sum / count
sum_errors_squared = 0
for x in data:
error = x - mean
sum_errors_squared += (error * error)
std_deviation = math.sqrt(sum_errors_squared / count)
return Stats(count, sum, min, max, mean, std_deviation)
class Stats:
"""
Statistics for a set of data
"""
def __init__(self, count, sum, min, max, mean, std_deviation):
self.count = count
self.sum = sum
self.min = min
self.max = max
self.mean = mean
self.std_deviation = std_deviation
class Waypoint:
"""
A waypoint created from multiple samples,
modelled as a non-axis-aligned (rotated) ellipsoid.
This models a waypoint based on a jittery source,
like GPS, where x and y values may not be completely
independent values.
"""
def __init__(self, samples, nstd = 1.0):
"""
Fit an ellipsoid to the given samples at the
given multiple of the standard deviation of the samples.
"""
# separate out the points by axis
self.x = [w[1] for w in samples]
self.y = [w[2] for w in samples]
# calculate the stats for each axis
self.x_stats = stats(self.x)
self.y_stats = stats(self.y)
#
# calculate a rotated ellipse that best fits the samples.
# We use a rotated ellipse because the x and y values
# of each point are not independent.
#
def eigsorted(cov):
"""
Calculate eigenvalues and eigenvectors
and return them sorted by eigenvalue.
"""
eigenvalues, eigenvectors = np.linalg.eigh(cov)
order = eigenvalues.argsort()[::-1]
return eigenvalues[order], eigenvectors[:, order]
# calculate covariance matrix between x and y values
self.cov = np.cov(self.x, self.y)
# get eigenvalues and vectors from covariance matrix
self.eigenvalues, self.eigenvectors = eigsorted(self.cov)
# calculate the ellipsoid at the given multiple of the standard deviation.
self.theta = np.degrees(np.arctan2(*self.eigenvectors[:, 0][::-1]))
self.width, self.height = 2 * nstd * np.sqrt(self.eigenvalues)
def is_inside(self, x, y):
"""
Determine if the given (x,y) point is within the waypoint's
fitted ellipsoid
"""
# if (x >= self.x_stats.min) and (x <= self.x_stats.max):
# if (y >= self.y_stats.min) and (y <= self.y_stats.max):
# return True
# return False
# if (x >= (self.x_stats.mean - self.x_stats.std_deviation)) and (x <= (self.x_stats.mean + self.x_stats.std_deviation)):
# if (y >= (self.y_stats.mean - self.y_stats.std_deviation)) and (y <= (self.y_stats.mean + self.y_stats.std_deviation)):
# return True
# return False
cos_theta = math.cos(self.theta)
sin_theta = math.sin(self.theta)
x_translated = x - self.x_stats.mean
y_translated = y - self.y_stats.mean
#
# basically translate the test point into the
# coordinate system of the ellipse (it's center)
# and then rotate the point and do a normal ellipse test
#
part1 = ((cos_theta * x_translated + sin_theta * y_translated) / self.width)**2
part2 = ((sin_theta * x_translated - cos_theta * y_translated) / self.height)**2
return (part1 + part2) <= 1
def is_in_range(self, x, y):
"""
Determine if the given (x,y) point is within the
range of the collected waypoint samples
"""
return (x >= self.x_stats.min) and \
(x <= self.x_stats.max) and \
(y >= self.y_stats.min) and \
(y <= self.y_stats.max)
def is_in_std(self, x, y, std_multiple=1.0):
"""
Determine if the given (x, y) point is within a given
multiple of the standard deviation of the samples
on each axis.
"""
x_std = self.x_stats.std_deviation * std_multiple
y_std = self.y_stats.std_deviation * std_multiple
return (x >= (self.x_stats.mean - x_std)) and \
(x <= (self.x_stats.mean + x_std)) and \
(y >= (self.y_stats.mean - y_std)) and \
(y <= (self.y_stats.mean + y_std))
def show(self):
"""
Draw the waypoint ellipsoid
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
ax = plt.subplot(111, aspect='equal')
self.plot()
plt.show()
def plot(self):
"""
Draw the waypoint ellipsoid
"""
from matplotlib.patches import Ellipse, Rectangle
import matplotlib.pyplot as plt
#define Matplotlib figure and axis
ax = plt.subplot(111, aspect='equal')
# plot the collected readings
plt.scatter(self.x, self.y)
# plot the centroid
plt.plot(self.x_stats.mean, self.y_stats.mean, marker="+", markeredgecolor="green", markerfacecolor="green")
# plot the range
bounds = Rectangle(
(self.x_stats.min, self.y_stats.min),
self.x_stats.max - self.x_stats.min,
self.y_stats.max - self.y_stats.min,
alpha=0.5,
edgecolor='red',
fill=False,
visible=True)
ax.add_artist(bounds)
# plot the ellipsoid
ellipse = Ellipse(xy=(self.x_stats.mean, self.y_stats.mean),
width=self.width, height=self.height,
angle=self.theta)
ellipse.set_alpha(0.25)
ellipse.set_facecolor('green')
ax.add_artist(ellipse)
def is_in_waypoint_range(waypoints, x, y):
i = 0
for waypoint in waypoints:
if waypoint.is_in_range(x, y):
return True, i
i += 1
return False, -1
def is_in_waypoint_std(waypoints, x, y, std):
i = 0
for waypoint in waypoints:
if waypoint.is_in_std(x, y, std):
return True, i
i += 1
return False, -1
def is_in_waypoint(waypoints, x, y):
i = 0
for waypoint in waypoints:
if waypoint.is_inside(x, y):
return True, i
i += 1
return False, -1
def plot(waypoints):
"""
Draw the waypoint ellipsoid
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
ax = plt.subplot(111, aspect='equal')
for waypoint in waypoints:
waypoint.plot()
plt.show()
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--serial", type=str, required=True, help="Serial port address, like '/dev/tty.usbmodem1411'")
parser.add_argument("-b", "--baudrate", type=int, default=9600, help="Serial port baud rate.")
parser.add_argument("-t", "--timeout", type=float, default=0.5, help="Serial port timeout in seconds.")
parser.add_argument("-sp", '--samples', type=int, default = 5, help = "Number of samples per waypoint.")
parser.add_argument("-wp", "--waypoints", type=int, default = 0, help = "Number of waypoints to collect; > 0 to collect waypoints, 0 to just log position")
parser.add_argument("-nstd", "--nstd", type=float, default=1.0, help="multiple of standard deviation for ellipse.")
parser.add_argument("-th", "--threaded", action='store_true', help = "run in threaded mode.")
parser.add_argument("-db", "--debug", action='store_true', help = "Enable extra logging")
args = parser.parse_args()
if args.waypoints < 0:
print("Use waypoints > 0 to collect waypoints, use 0 waypoints to just log position")
parser.print_help()
sys.exit(0)
if args.samples <= 0:
print("Samples per waypoint must be greater than zero")
parser.print_help()
sys.exit(0)
if args.nstd <= 0:
print("Waypoint multiplier must be greater than zero")
parser.print_help()
sys.exit(0)
if args.timeout <= 0:
print("Timeout must be greater than zero")
parser.print_help()
sys.exit(0)
update_thread = None
gps_reader = None
waypoint_count = args.waypoints # number of paypoints in the path
samples_per_waypoint = args.samples # number of readings per waypoint
waypoints = []
waypoint_samples = []
try:
gps_reader = Gps(args.serial, baudrate=args.baudrate, timeout=args.timeout, debug=args.debug)
#
# start the threaded part
# and a threaded window to show plot
#
if args.threaded:
update_thread = threading.Thread(target=gps_reader.update, args=())
update_thread.start()
def read_gps():
return gps_reader.run_threaded() if args.threaded else gps_reader.run()
ts = time.time()
state = "prompt" if waypoint_count > 0 else ""
while gps_reader.running:
readings = read_gps()
if readings:
print("")
if state == "prompt":
print(f"Move to waypoint #{len(waypoints)+1} and press the space bar and enter to start sampling or any other key to just start logging.")
state = "move"
elif state == "move":
key_press = readchar.readchar() # sys.stdin.read(1)
if key_press == ' ':
waypoint_samples = []
gps_reader.clear() # throw away buffered readings
state = "sampling"
else:
state = "" # just start logging
elif state == "sampling":
waypoint_samples += readings
count = len(waypoint_samples)
print(f"Collected {count} so far...")
if count > samples_per_waypoint:
print(f"...done. Collected {count} samples for waypoint #{len(waypoints)+1}")
#
# model a waypoint as a rotated ellipsoid
# that represents a 95% confidence interval
# around the points measured at the waypoint.
#
waypoint = Waypoint(waypoint_samples, nstd=args.nstd)
waypoints.append(waypoint)
if len(waypoints) < waypoint_count:
state = "prompt"
else:
state = "test_prompt"
if args.debug:
plot(waypoints)
elif state == "test_prompt":
print("Waypoints are recorded. Now walk around and see when you are in a waypoint.")
state = "test"
elif state == "test":
for ts, x, y in readings:
print(f"Your position is ({x}, {y})")
hit, index = is_in_waypoint_range(waypoints, x, y)
if hit:
print(f"You are within the sample range of waypoint #{index + 1}")
std_deviation = 1.0
hit, index = is_in_waypoint_std(waypoints, x, y, std_deviation)
if hit:
print(f"You are within {std_deviation} standard deviations of the center of waypoint #{index + 1}")
hit, index = is_in_waypoint(waypoints, x, y)
if hit:
print(f"You are at waypoint's ellipse #{index + 1}")
else:
# just log the readings
for position in readings:
ts, x, y = position
print(f"You are at ({x}, {y})")
else:
if time.time() > (ts + 0.5):
print(".", end="")
ts = time.time()
finally:
if gps_reader:
gps_reader.shutdown()
if update_thread is not None:
update_thread.join() # wait for thread to end
|
Layer4.py
|
from base64 import encode
from fastapi import APIRouter, Request
from denialofservice import Layer4
from globals import NUMBER_OF_THREADS
from threading import Thread
from log import log
router = APIRouter()
@router.post("/synflood")
async def read_parameters(time: int, target: str, port: int, request: Request):
try:
for i in range(NUMBER_OF_THREADS):
t = Thread(target=Layer4.SYN_Flood, args=(target, port, time,))
t.start()
log.info(f"{target}:{port} SYN-Flooded from {request.client.host} for {time} seconds")
except:
log.warning(f"{target}:{port} SYN-Flood from {request.client.host} for {time} seconds could not be triggered")
@router.post("/udpflood")
async def read_parameters(time: int, target: str, port: int, request: Request):
try:
for i in range(NUMBER_OF_THREADS):
t = Thread(target=Layer4.UDP_Flood, args=(target, port, time,))
t.start()
log.info(f"{target}:{port} UDP-Flooded from {request.client.host} for {time} seconds")
except:
log.warning(f"{target}:{port} UDP-Flood from {request.client.host} for {time} seconds could not be triggered")
@router.post("/emailspam")
async def read_parameters(time: int, receivermail: str, message: str, request: Request):
try:
t = Thread(target=Layer4.EMAIL_Spam, args=(receivermail, time, message,))
t.start()
log.info(f"{receivermail} EMAIL-Spammed from {request.client.host} for {time} seconds")
except Exception as e:
print(e)
log.warning(f"{receivermail} EMAIL-Spam from {request.client.host} for {time} seconds could not be triggered")
|
__init__.py
|
"""Helper operations and classes for general model building.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import warnings
import collections
import pickle
import os
import time
import warnings
import numpy as np
import pandas as pd
import tensorflow as tf
import tempfile
import threading
from deepchem.models import Model
from deepchem.metrics import from_one_hot
from deepchem.nn import model_ops
from deepchem.models.tensorflow_models import utils as tf_utils
from deepchem.trans import undo_transforms
from deepchem.utils.save import log
from deepchem.utils.evaluate import Evaluator
from deepchem.data import pad_features
from tensorflow.contrib.layers.python.layers import batch_norm
def softmax(x):
"""Simple numpy softmax implementation
"""
# (n_samples, n_classes)
if len(x.shape) == 2:
row_max = np.max(x, axis=1)
x -= row_max.reshape((x.shape[0], 1))
x = np.exp(x)
row_sum = np.sum(x, axis=1)
x /= row_sum.reshape((x.shape[0], 1))
# (n_samples, n_tasks, n_classes)
elif len(x.shape) == 3:
row_max = np.max(x, axis=2)
x -= row_max.reshape(x.shape[:2] + (1,))
x = np.exp(x)
row_sum = np.sum(x, axis=2)
x /= row_sum.reshape(x.shape[:2] + (1,))
return x
class TensorflowGraph(object):
"""Simple class that holds information needed to run Tensorflow graph."""
def __init__(self, graph, session, name_scopes, output, labels, weights,
loss):
warnings.warn(
"TensorflowGraph is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.graph = graph
self.session = session
self.name_scopes = name_scopes
self.output = output
self.labels = labels
self.weights = weights
self.loss = loss
@staticmethod
def get_placeholder_scope(graph, name_scopes):
"""Gets placeholder scope."""
placeholder_root = "placeholders"
return TensorflowGraph.shared_name_scope(placeholder_root, graph,
name_scopes)
@staticmethod
def shared_name_scope(name, graph, name_scopes):
"""Returns a singleton TensorFlow scope with the given name.
Used to prevent '_1'-appended scopes when sharing scopes with child classes.
Args:
name: String. Name scope for group of operations.
Returns:
tf.name_scope with the provided name.
"""
with graph.as_default():
if name not in name_scopes:
with tf.name_scope(name) as scope:
name_scopes[name] = scope
return tf.name_scope(name_scopes[name])
@staticmethod
def get_feed_dict(named_values):
feed_dict = {}
placeholder_root = "placeholders"
for name, value in named_values.items():
feed_dict['{}/{}:0'.format(placeholder_root, name)] = value
return feed_dict
class TensorflowGraphModel(Model):
"""Parent class for deepchem Tensorflow models.
Classifier:
n_classes
Has the following attributes:
placeholder_root: String placeholder prefix, used to create
placeholder_scope.
Generic base class for defining, training, and evaluating TensorflowGraphs.
Subclasses must implement the following methods:
build
add_output_ops
add_training_cost
Args:
train: If True, model is in training mode.
logdir: Directory for output files.
"""
def __init__(self,
n_tasks,
n_features,
logdir=None,
layer_sizes=[1000],
weight_init_stddevs=[.02],
bias_init_consts=[1.],
penalty=0.0,
penalty_type="l2",
dropouts=[0.5],
learning_rate=.001,
momentum=.9,
optimizer="adam",
batch_size=50,
n_classes=2,
pad_batches=False,
verbose=True,
seed=None,
**kwargs):
"""Constructs the computational graph.
This function constructs the computational graph for the model. It relies
subclassed methods (build/cost) to construct specific graphs.
Parameters
----------
n_tasks: int
Number of tasks
n_features: int
Number of features.
logdir: str
Location to save data
layer_sizes: list
List of layer sizes.
weight_init_stddevs: list
List of standard deviations for weights (sampled from zero-mean
gaussians). One for each layer.
bias_init_consts: list
List of bias initializations. One for each layer.
penalty: float
Amount of penalty (l2 or l1 applied)
penalty_type: str
Either "l2" or "l1"
dropouts: list
List of dropout amounts. One for each layer.
learning_rate: float
Learning rate for model.
momentum: float
Momentum. Only applied if optimizer=="momentum"
optimizer: str
Type of optimizer applied.
batch_size: int
Size of minibatches for training.
n_classes: int
Number of classes if this is for classification.
TODO(rbharath): Move this argument to TensorflowClassifier
verbose: True
Perform logging.
seed: int
If not none, is used as random seed for tensorflow.
"""
warnings.warn(
"TensorflowGraphModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
# Save hyperparameters
self.n_tasks = n_tasks
self.n_features = n_features
self.layer_sizes = layer_sizes
self.weight_init_stddevs = weight_init_stddevs
self.bias_init_consts = bias_init_consts
self.penalty = penalty
self.penalty_type = penalty_type
self.dropouts = dropouts
self.learning_rate = learning_rate
self.momentum = momentum
self.optimizer = optimizer
self.batch_size = batch_size
self.n_classes = n_classes
self.pad_batches = pad_batches
self.seed = seed
super(TensorflowGraphModel, self).__init__(
self, model_dir=logdir, verbose=verbose)
# Guard variable to make sure we don't Restore() this model
# from a disk checkpoint more than once.
self._restored_model = False
# Path to save checkpoint files, which matches the
# replicated supervisor's default path.
self._save_path = os.path.join(self.model_dir, 'model.ckpt')
self.train_graph = self.construct_graph(training=True, seed=self.seed)
self.eval_graph = self.construct_graph(training=False, seed=self.seed)
def save(self):
"""
No-op since tf models save themselves during fit()
"""
pass
def reload(self):
"""
Loads model from disk. Thin wrapper around restore() for consistency.
"""
self.restore()
def get_num_tasks(self):
return self.n_tasks
def construct_graph(self, training, seed):
"""Returns a TensorflowGraph object."""
graph = tf.Graph()
# Lazily created by _get_shared_session().
shared_session = None
# Cache of TensorFlow scopes, to prevent '_1' appended scope names
# when subclass-overridden methods use the same scopes.
name_scopes = {}
# Setup graph
with graph.as_default():
if seed is not None:
tf.set_random_seed(seed)
(output, labels, weights) = self.build(graph, name_scopes, training)
if training:
loss = self.add_training_cost(graph, name_scopes, output, labels, weights)
else:
loss = None
output = self.add_output_ops(graph, output) # add softmax heads
return TensorflowGraph(
graph=graph,
session=shared_session,
name_scopes=name_scopes,
output=output,
labels=labels,
weights=weights,
loss=loss)
def add_training_cost(self, graph, name_scopes, output, labels, weights):
with graph.as_default():
epsilon = 1e-3 # small float to avoid dividing by zero
weighted_costs = [] # weighted costs for each example
gradient_costs = [] # costs used for gradient calculation
with TensorflowGraph.shared_name_scope('costs', graph, name_scopes):
for task in range(self.n_tasks):
task_str = str(task).zfill(len(str(self.n_tasks)))
with TensorflowGraph.shared_name_scope('cost_{}'.format(task_str),
graph, name_scopes):
with tf.name_scope('weighted'):
weighted_cost = self.cost(output[task], labels[task],
weights[task])
weighted_costs.append(weighted_cost)
with tf.name_scope('gradient'):
# Note that we divide by the batch size and not the number of
# non-zero weight examples in the batch. Also, instead of using
# tf.reduce_mean (which can put ops on the CPU) we explicitly
# calculate with div/sum so it stays on the GPU.
gradient_cost = tf.math.divide(
tf.reduce_sum(weighted_cost), self.batch_size)
gradient_costs.append(gradient_cost)
# aggregated costs
with TensorflowGraph.shared_name_scope('aggregated', graph,
name_scopes):
with tf.name_scope('gradient'):
loss = tf.add_n(gradient_costs)
# weight decay
if self.penalty != 0.0:
penalty = model_ops.weight_decay(self.penalty_type, self.penalty)
loss += penalty
return loss
def fit(self,
dataset,
nb_epoch=10,
max_checkpoints_to_keep=5,
log_every_N_batches=50,
checkpoint_interval=10,
**kwargs):
"""Fit the model.
Parameters
----------
dataset: dc.data.Dataset
Dataset object holding training data
nb_epoch: 10
Number of training epochs.
max_checkpoints_to_keep: int
Maximum number of checkpoints to keep; older checkpoints will be deleted.
log_every_N_batches: int
Report every N batches. Useful for training on very large datasets,
where epochs can take long time to finish.
checkpoint_interval: int
Frequency at which to write checkpoints, measured in epochs
Raises
------
AssertionError
If model is not in training mode.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
log("Training for %d epochs" % nb_epoch, self.verbose)
with self.train_graph.graph.as_default():
train_op = self.get_training_op(self.train_graph.graph,
self.train_graph.loss)
with self._get_shared_session(train=True) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
# Save an initial checkpoint.
saver.save(sess, self._save_path, global_step=0)
# Define the code that runs on a separate thread to feed data into the queue.
def enqueue(sess, dataset, nb_epoch, epoch_end_indices):
index = 0
for epoch in range(nb_epoch):
for X_b, y_b, w_b, ids_b in dataset.iterbatches(
self.batch_size, pad_batches=self.pad_batches):
feed_dict = self.construct_feed_dict(X_b, y_b, w_b, ids_b)
sess.run(self.train_graph.graph.enqueue, feed_dict=feed_dict)
index += 1
epoch_end_indices.append(index)
sess.run(self.train_graph.graph.queue.close())
epoch_end_indices = []
enqueue_thread = threading.Thread(
target=enqueue, args=[sess, dataset, nb_epoch, epoch_end_indices])
enqueue_thread.daemon = True
enqueue_thread.start()
# Main training loop.
try:
epoch = 0
index = 0
index_in_epoch = 0
avg_loss = 0.0
while True:
if index_in_epoch % log_every_N_batches == 0:
log("On batch %d" % index_in_epoch, self.verbose)
# Run training op.
fetches = self.train_graph.output + [
train_op, self.train_graph.loss
]
fetched_values = sess.run(fetches)
loss = fetched_values[-1]
avg_loss += loss
index += 1
index_in_epoch += 1
if len(epoch_end_indices) > 0 and index >= epoch_end_indices[0]:
# We have reached the end of an epoch.
if epoch % checkpoint_interval == checkpoint_interval - 1:
saver.save(sess, self._save_path, global_step=epoch)
avg_loss = float(avg_loss) / index_in_epoch
log('Ending epoch %d: Average loss %g' % (epoch, avg_loss),
self.verbose)
epoch += 1
index_in_epoch = 0
avg_loss = 0.0
del epoch_end_indices[0]
except tf.errors.OutOfRangeError:
# We have reached the end of the data.
pass
# Always save a final checkpoint when complete.
saver.save(sess, self._save_path, global_step=epoch + 1)
############################################################## TIMING
time2 = time.time()
print("TIMING: model fitting took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
def add_output_ops(self, graph, output):
"""Replace logits with softmax outputs."""
with graph.as_default():
softmax = []
with tf.name_scope('inference'):
for i, logits in enumerate(output):
softmax.append(tf.nn.softmax(logits, name='softmax_%d' % i))
output = softmax
return output
def build(self, graph, name_scopes, training):
"""Define the core graph.
NOTE(user): Operations defined here should be in their own name scope to
avoid any ambiguity when restoring checkpoints.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def construct_feed_dict(self, X_b, y_b=None, w_b=None, ids_b=None):
"""Transform a minibatch of data into a feed_dict.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Float label tensor. For classification tasks, this tensor will
have shape batch_size x n_classes. For regression tasks, this tensor
will have shape batch_size.
Raises:
NotImplementedError: if not overridden by concrete subclass.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def add_example_weight_placeholders(self, graph, name_scopes):
"""Add Placeholders for example weights for each task.
This method creates the following Placeholders for each task:
weights_%d: Label tensor with shape batch_size.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
weights = []
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with placeholder_scope:
for task in range(self.n_tasks):
weights.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[None], name='weights_%d' % task)))
return weights
def cost(self, output, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
output: Tensor with model outputs.
labels: Tensor with true labels.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example. For use in subclasses that want to calculate additional costs.
"""
raise NotImplementedError('Must be overridden by concrete subclass')
def get_training_op(self, graph, loss):
"""Get training op for applying gradients to variables.
Subclasses that need to do anything fancy with gradients should override
this method.
Returns:
A training op.
"""
with graph.as_default():
opt = model_ops.optimizer(self.optimizer, self.learning_rate,
self.momentum)
return opt.minimize(loss, name='train')
def _get_shared_session(self, train):
# allow_soft_placement=True allows ops without a GPU implementation
# to run on the CPU instead.
if train:
if not self.train_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
#gpu memory growth option
config.gpu_options.allow_growth = True
self.train_graph.session = tf.Session(config=config)
return self.train_graph.session
else:
if not self.eval_graph.session:
config = tf.ConfigProto(allow_soft_placement=True)
#gpu memory growth option
config.gpu_options.allow_growth = True
self.eval_graph.session = tf.Session(config=config)
return self.eval_graph.session
def restore(self):
"""Restores the model from the provided training checkpoint.
Args:
checkpoint: string. Path to checkpoint file.
"""
if self._restored_model:
return
with self.eval_graph.graph.as_default():
last_checkpoint = self._find_last_checkpoint()
# TODO(rbharath): Is setting train=False right here?
saver = tf.train.Saver()
saver.restore(self._get_shared_session(train=False), last_checkpoint)
self._restored_model = True
def predict(self, dataset, transformers=[]):
"""
Uses self to make predictions on provided Dataset object.
Returns:
y_pred: numpy ndarray of shape (n_samples,)
"""
y_preds = []
n_tasks = self.get_num_tasks()
ind = 0
for (X_batch, _, _, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_on_batch(X_batch)
# Discard any padded predictions
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks))
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = np.reshape(y_pred, (n_samples, n_tasks))
# Special case to handle singletasks.
if n_tasks == 1:
y_pred = np.reshape(y_pred, (n_samples,))
return y_pred
def predict_proba(self, dataset, transformers=[], n_classes=2):
"""
TODO: Do transformers even make sense here?
Returns:
y_pred: numpy ndarray of shape (n_samples, n_classes*n_tasks)
"""
y_preds = []
n_tasks = self.get_num_tasks()
for (X_batch, y_batch, w_batch, ids_batch) in dataset.iterbatches(
self.batch_size, deterministic=True):
n_samples = len(X_batch)
y_pred_batch = self.predict_proba_on_batch(X_batch)
y_pred_batch = y_pred_batch[:n_samples]
y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks, n_classes))
y_pred_batch = undo_transforms(y_pred_batch, transformers)
y_preds.append(y_pred_batch)
y_pred = np.vstack(y_preds)
# The iterbatches does padding with zero-weight examples on the last batch.
# Remove padded examples.
n_samples = len(dataset)
y_pred = y_pred[:n_samples]
y_pred = np.reshape(y_pred, (n_samples, n_tasks, n_classes))
return y_pred
# TODO(rbharath): Verify this can be safely removed.
#def evaluate(self, dataset, metrics, transformers=[]):
# """
# Evaluates the performance of this model on specified dataset.
#
# Parameters
# ----------
# dataset: dc.data.Dataset
# Dataset object.
# metric: deepchem.metrics.Metric
# Evaluation metric
# transformers: list
# List of deepchem.transformers.NLP
# Returns
# -------
# dict
# Maps tasks to scores under metric.
# """
# evaluator = Evaluator(self, dataset, transformers)
# scores = evaluator.compute_model_performance(metrics)
# return scores
def _find_last_checkpoint(self):
"""Finds last saved checkpoint."""
highest_num, last_checkpoint = -np.inf, None
for filename in os.listdir(self.model_dir):
# checkpoints look like model_dir/model.ckpt-N
# self._save_path is "model_dir/model.ckpt"
if os.path.basename(self._save_path) in filename:
try:
N = int(filename.split("-")[1].split(".")[0])
if N > highest_num:
highest_num = N
last_checkpoint = "model.ckpt-" + str(N)
except ValueError:
pass
return os.path.join(self.model_dir, last_checkpoint)
class TensorflowClassifier(TensorflowGraphModel):
"""Classification model.
Subclasses must set the following attributes:
output: logits op(s) used for computing classification loss and predicted
class probabilities for each task.
"""
def get_task_type(self):
return "classification"
def cost(self, logits, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
logits: Tensor with shape batch_size x n_classes containing logits.
labels: Tensor with shape batch_size x n_classes containing true labels
in a one-hot encoding.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example.
"""
return tf.multiply(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels),
weights)
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Label tensor with shape batch_size x n_classes.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
n_classes = self.n_classes
labels = []
with placeholder_scope:
for task in range(self.n_tasks):
labels.append(
tf.identity(
tf.placeholder(
tf.float32,
shape=[None, n_classes],
name='labels_%d' % task)))
return labels
def predict_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
len_unpadded = len(X)
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
output = []
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_output = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_output.ndim == 3:
batch_output = batch_output.transpose((1, 0, 2))
elif batch_output.ndim == 2:
batch_output = batch_output.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_output.shape,))
output.append(batch_output)
outputs = np.array(
from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))
outputs = np.copy(outputs)
outputs = np.reshape(outputs, (len(X), n_tasks))
outputs = outputs[:len_unpadded]
return outputs
def predict_proba_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
Note that the output arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
with self._get_shared_session(train=False).as_default():
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
else:
raise ValueError('Unrecognized rank combination for output: %s ' %
(batch_outputs.shape,))
# Note that softmax is already applied in construct_grpah
outputs = batch_outputs
return np.copy(outputs)
class TensorflowRegressor(TensorflowGraphModel):
"""Regression model.
Subclasses must set the following attributes:
output: Op(s) used for computing regression loss and predicted regression
outputs for each task.
"""
def get_task_type(self):
return "regressor"
def add_output_ops(self, graph, output):
"""No-op for regression models since no softmax."""
return output
def cost(self, output, labels, weights):
"""Calculate single-task training cost for a batch of examples.
Args:
output: Tensor with shape batch_size containing predicted values.
labels: Tensor with shape batch_size containing true values.
weights: Tensor with shape batch_size containing example weights.
Returns:
A tensor with shape batch_size containing the weighted cost for each
example.
"""
return tf.multiply(0.5 * tf.square(output - labels), weights)
def add_label_placeholders(self, graph, name_scopes):
"""Add Placeholders for labels for each task.
This method creates the following Placeholders for each task:
labels_%d: Label tensor with shape batch_size.
Placeholders are wrapped in identity ops to avoid the error caused by
feeding and fetching the same tensor.
"""
placeholder_scope = TensorflowGraph.get_placeholder_scope(
graph, name_scopes)
with graph.as_default():
batch_size = self.batch_size
labels = []
with placeholder_scope:
for task in range(self.n_tasks):
labels.append(
tf.identity(
tf.placeholder(
tf.float32, shape=[None], name='labels_%d' % task)))
return labels
def predict_on_batch(self, X):
"""Return model output for the provided input.
Restore(checkpoint) must have previously been called on this object.
Args:
dataset: dc.data.Dataset object.
Returns:
Tuple of three numpy arrays with shape n_examples x n_tasks (x ...):
output: Model outputs.
labels: True labels.
weights: Example weights.
Note that the output and labels arrays may be more than 2D, e.g. for
classifier models that return class probabilities.
Raises:
AssertionError: If model is not in evaluation mode.
ValueError: If output and labels are not both 3D or both 2D.
"""
len_unpadded = len(X)
if self.pad_batches:
X = pad_features(self.batch_size, X)
if not self._restored_model:
self.restore()
with self.eval_graph.graph.as_default():
# run eval data through the model
n_tasks = self.n_tasks
outputs = []
with self._get_shared_session(train=False).as_default():
n_samples = len(X)
feed_dict = self.construct_feed_dict(X)
data = self._get_shared_session(train=False).run(
self.eval_graph.output, feed_dict=feed_dict)
batch_outputs = np.asarray(data[:n_tasks], dtype=float)
# reshape to batch_size x n_tasks x ...
if batch_outputs.ndim == 3:
batch_outputs = batch_outputs.transpose((1, 0, 2))
elif batch_outputs.ndim == 2:
batch_outputs = batch_outputs.transpose((1, 0))
# Handle edge case when batch-size is 1.
elif batch_outputs.ndim == 1:
n_samples = len(X)
batch_outputs = batch_outputs.reshape((n_samples, n_tasks))
else:
raise ValueError('Unrecognized rank combination for output: %s' %
(batch_outputs.shape))
# Prune away any padding that was added
batch_outputs = batch_outputs[:n_samples]
outputs.append(batch_outputs)
outputs = np.squeeze(np.concatenate(outputs))
outputs = np.copy(outputs)
# Handle case of 0-dimensional scalar output
if len(outputs.shape) > 0:
return outputs[:len_unpadded]
else:
outputs = np.reshape(outputs, (1,))
return outputs
|
lock.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2019 Jayson
#
from threading import Thread, Lock
amount = 0
lock = Lock()
def worker(count):
global amount
for i in range(count):
lock.acquire(True)
amount = amount + 1
lock.release()
if __name__ == '__main__':
t1 = Thread(target=worker, args=(10000000,))
t2 = Thread(target=worker, args=(20000000,))
t3 = Thread(target=worker, args=(30000000,))
t1.start()
t2.start()
t3.start()
t3.join()
print(amount)
|
spl.py
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2017
"""
SPL Python primitive operators.
********
Overview
********
SPL primitive operators that call a Python function or
class methods are created by decorators provided by this module.
The name of the function or callable class becomes the name of the
operator.
Once created the operators become part of a toolkit and may be used
like any other SPL operator.
A decorated function is a stateless operator while a decorated class
is an optionally stateful operator.
These are the supported decorators that create an SPL operator:
* :py:class:`@spl.source <source>` - Creates a source operator that produces tuples.
* :py:class:`@spl.filter <filter>` - Creates a operator that filters tuples.
* :py:class:`@spl.map <map>` - Creates a operator that maps input tuples to output tuples.
* :py:class:`@spl.for_each <for_each>` - Creates a operator that terminates a stream processing each tuple.
* :py:class:`@spl.primitive_operator <primitive_operator>` - Creates an SPL primitive operator that has an arbitrary number of input and output ports.
*******************************
Python classes as SPL operators
*******************************
Overview
========
Decorating a Python class creates a stateful SPL operator
where the instance fields of the class are the operator's state. An instance
of the class is created when the SPL operator invocation is initialized
at SPL runtime. The instance of the Python class is private to the SPL
operator and is maintained for the lifetime of the operator.
If the class has instance fields then they are the state of the
operator and are private to each invocation of the operator.
If the `__init__` method has parameters beyond the first
`self` parameter then they are mapped to operator parameters.
Any parameter that has a default value becomes an optional parameter
to the SPL operator. Parameters of the form `\*args` and `\*\*kwargs`
are not supported.
.. warning::
Parameter names must be valid SPL identifers,
SPL identifiers start with an ASCII letter or underscore,
followed by ASCII letters, digits, or underscores.
The name also must not be an SPL keyword.
Parameter names ``suppress`` and ``include`` are reserved.
The value of the operator parameters at SPL operator invocation are passed
to the `__init__` method. This is equivalent to creating an instance
of the class passing the operator parameters into the constructor.
For example, with this decorated class producing an SPL source
operator::
@spl.source()
class Range(object):
def __init__(self, stop, start=0):
self.start = start
self.stop = stop
def __iter__(self):
return zip(range(self.start, self.stop))
The SPL operator `Range` has two parameters, `stop` is mandatory and `start` is optional, defaulting to zero. Thus the SPL operator may be invoked as::
// Produces the sequence of values from 0 to 99
//
// Creates an instance of the Python class
// Range using Range(100)
//
stream<int32 seq> R = Range() {
param
stop: 100;
}
or both operator parameters can be set::
// Produces the sequence of values from 50 to 74
//
// Creates an instance of the Python class
// Range using Range(75, 50)
//
stream<int32 seq> R = Range() {
param
start: 50;
stop: 75;
}
Operator state
==============
Use of a class allows the operator to be stateful by maintaining state in instance
attributes across invocations (tuple processing).
.. note::
For future compatibility instances of a class should ensure that the object's
state can be pickled. See https://docs.python.org/3.5/library/pickle.html#handling-stateful-objects
Operator initialization & shutdown
==================================
Execution of an instance for an operator effectively run in a context manager so that an instance's ``__enter__``
method is called when the processing element containing the operator is initialized
and its ``__exit__`` method called when the processing element is stopped. To take advantage of this
the class must define both ``__enter__`` and ``__exit__`` methods.
.. note::
For future compatibility operator initialization such as opening files should be in ``__enter__``
in order to support stateful operator restart & checkpointing in the future.
Example of using ``__enter__`` and ``__exit__`` to open and close a file::
import streamsx.ec as ec
@spl.map()
class Sentiment(object):
def __init__(self, name):
self.name = name
self.file = None
def __enter__(self):
self.file = open(self.name, 'r')
def __exit__(self, exc_type, exc_value, traceback):
if self.file is not None:
self.file.close()
def __call__(self):
pass
When an instance defines a valid ``__exit__`` method then it will be called with an exception when:
* the instance raises an exception during processing of a tuple
* a data conversion exception is raised converting a Python value to an SPL tuple or attribute
If ``__exit__`` returns a true value then the exception is suppressed and processing continues, otherwise the enclosing processing element will be terminated.
Application log and trace
=========================
IBM Streams provides application trace and log services which are
accesible through standard Python loggers from the `logging` module.
See :ref:`streams_app_log_trc`.
*********************************
Python functions as SPL operators
*********************************
Decorating a Python function creates a stateless SPL operator.
In SPL terms this is similar to an SPL Custom operator, where
the code in the Python function is the custom code. For
operators with input ports the function is called for each
input tuple, passing a Python representation of the SPL input tuple.
For an SPL source operator the function is called to obtain an iterable
whose contents will be submitted to the output stream as SPL tuples.
Operator parameters are not supported.
An example SPL sink operator that prints each input SPL tuple after
its conversion to a Python tuple::
@spl.for_each()
def PrintTuple(*tuple_):
"Print each tuple to standard out."
print(tuple_, flush=True)
.. _spl-tuple-to-python:
*******************************
Processing SPL tuples in Python
*******************************
SPL tuples are converted to Python objects and passed to a decorated callable.
Overview
========
For each SPL tuple arriving at an input port a Python function is called with
the SPL tuple converted to Python values suitable for the function call.
How the tuple is passed is defined by the tuple passing style.
Tuple Passing Styles
====================
An input tuple can be passed to Python function using a number of different styles:
* *dictionary*
* *tuple*
* *attributes by name* **not yet implemented**
* *attributes by position*
Dictionary
----------
Passing the SPL tuple as a Python dictionary is flexible
and makes the operator independent of any schema.
A disadvantage is the reduction in code readability
for Python function by not having formal parameters,
though getters such as ``tuple['id']`` mitigate that to some extent.
If the function is general purpose and can derive meaning
from the keys that are the attribute names then ``**kwargs`` can be useful.
When the only function parameter is ``**kwargs``
(e.g. ``def myfunc(**tuple_):``) then the passing style is *dictionary*.
All of the attributes are passed in the dictionary
using the SPL schema attribute name as the key.
Tuple
-----
Passing the SPL tuple as a Python tuple is flexible
and makes the operator independent of any schema
but is brittle to changes in the SPL schema.
Another disadvantage is the reduction in code readability
for Python function by not having formal parameters.
However if the function is general purpose and independent
of the tuple contents ``*args`` can be useful.
When the only function parameter is ``*args``
(e.g. ``def myfunc(*tuple_):``) then the passing style is *tuple*.
All of the attributes are passed as a Python tuple
with the order of values matching the order of the SPL schema.
Attributes by name
------------------
(**not yet implemented**)
Passing attributes by name can be robust against changes
in the SPL scheme, e.g. additional attributes being added in
the middle of the schema, but does require that the SPL schema
has matching attribute names.
When *attributes by name* is used then SPL tuple attributes
are passed to the function by name for formal parameters.
Order of the attributes and parameters need not match.
This is supported for function parameters of
kind ``POSITIONAL_OR_KEYWORD`` and ``KEYWORD_ONLY``.
If the function signature also contains a parameter of the form
``**kwargs`` (``VAR_KEYWORD``) then any attributes not bound to
formal parameters are passed in its dictionary using the
SPL schema attribute name as the key.
If the function signature also contains an arbitrary argument
list ``*args`` then any attributes not bound to formal parameters
or to ``**kwargs`` are passed in order of the SPL schema.
If there are only formal parameters any non-bound attributes
are not passed into the function.
Attributes by position
----------------------
Passing attributes by position allows the SPL operator to
be independent of the SPL schema but is brittle to
changes in the SPL schema. For example a function expecting
an identifier and a sensor reading as the first two attributes
would break if an attribute representing region was added as
the first SPL attribute.
When *attributes by position* is used then SPL tuple attributes are
passed to the function by position for formal parameters.
The first SPL attribute in the tuple is passed as the first parameter.
This is supported for function parameters of kind `POSITIONAL_OR_KEYWORD`.
If the function signature also contains an arbitrary argument
list `\*args` (`VAR_POSITIONAL`) then any attributes not bound
to formal parameters are passed in order of the SPL schema.
The function signature must not contain a parameter of the form
``**kwargs`` (`VAR_KEYWORD`).
If there are only formal parameters any non-bound attributes
are not passed into the function.
The SPL schema must have at least the number of positional arguments
the function requires.
Selecting the style
===================
For signatures only containing a parameter of the form
``*args`` or ``**kwargs`` the style is implicitly defined:
* ``def f(**tuple_)`` - *dictionary* - ``tuple_`` will contain a dictionary of all of the SPL tuple attribute's values with the keys being the attribute names.
* ``def f(*tuple_)`` - *tuple* - ``tuple_`` will contain all of the SPL tuple attribute's values in order of the SPL schema definition.
Otherwise the style is set by the ``style`` parameter to the decorator,
defaulting to *attributes by name*. The style value can be set to:
* ``'name'`` - *attributes by name* (the default)
* ``'position'`` - *attributes by position*
**Note**: For backwards compatibility ``@spl.pipe`` and ``@spl.sink``
**always** use *attributes by position* and do not support ``**kwargs``.
They do not support the ``style`` parameter.
Examples
========
These examples show how an SPL tuple with the schema and value::
tuple<rstring id, float64 temp, boolean increase>
{id='battery', temp=23.7, increase=true}
is passed into a variety of functions by showing the effective Python
call and the resulting values of the function's parameters.
*Dictionary* consuming all attributes by ``**kwargs``::
@spl.map()
def f(**tuple_)
pass
# f({'id':'battery', 'temp':23.7, 'increase': True})
# tuple_={'id':'battery', 'temp':23.7, 'increase':True}
*Tuple* consuming all attributes by ``*args``::
@spl.map()
def f(*tuple_)
pass
# f('battery', 23.7, True)
# tuple_=('battery',23.7, True)
*Attributes by name* consuming all attributes::
@spl.map()
def f(id, temp, increase)
pass
# f(id='battery', temp=23.7, increase=True)
# id='battery'
# temp=23.7
# increase=True
*Attributes by name* consuming a subset of attributes::
@spl.map()
def f(id, temp)
pass
# f(id='battery', temp=23.7)
# id='battery'
# temp=23.7
*Attributes by name* consuming a subset of attributes in a different order::
@spl.map()
def f(increase, temp)
pass
# f(temp=23.7, increase=True)
# increase=True
# temp=23.7
*Attributes by name* consuming `id` by name and remaining attributes by ``**kwargs``::
@spl.map()
def f(id, **tuple_)
pass
# f(id='battery', {'temp':23.7, 'increase':True})
# id='battery'
# tuple_={'temp':23.7, 'increase':True}
*Attributes by name* consuming `id` by name and remaining attributes by ``*args``::
@spl.map()
def f(id, *tuple_)
pass
# f(id='battery', 23.7, True)
# id='battery'
# tuple_=(23.7, True)
*Attributes by position* consuming all attributes::
@spl.map(style='position')
def f(key, value, up)
pass
# f('battery', 23.7, True)
# key='battery'
# value=23.7
# up=True
*Attributes by position* consuming a subset of attributes::
@spl.map(style='position')
def f(a, b)
pass
# f('battery', 23.7)
# a='battery'
# b=23.7
*Attributes by position* consuming `id` by position and remaining attributes by ``*args``::
@spl.map(style='position')
def f(key, *tuple_)
pass
# f('battery', 23.7, True)
# key='battery'
# tuple_=(23.7, True)
In all cases the SPL tuple must be able to provide all parameters
required by the function. If the SPL schema is insufficient then
an error will result, typically an SPL compile time error.
The SPL schema can provide a subset of the formal parameters if the
remaining attributes are optional (having a default).
*Attributes by name* consuming a subset of attributes with an optional parameter not matched by the schema::
@spl.map()
def f(id, temp, pressure=None)
pass
# f(id='battery', temp=23.7)
# id='battery'
# temp=23.7
# pressure=None
.. _submit-from-python:
************************************
Submission of SPL tuples from Python
************************************
The return from a decorated callable results in submission of SPL tuples
on the associated outut port.
A Python function must return:
* ``None``
* a Python tuple
* a Python dictionary
* a list containing any of the above.
None
====
When ``None`` is return then no tuple will be submitted to
the operator output port.
Python tuple
============
When a Python tuple is returned it is converted to an SPL tuple and
submitted to the output port.
The values of a Python tuple are assigned to an output SPL tuple by position,
so the first value in the Python tuple is assigned to the first attribute
in the SPL tuple::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,b,a+b)
# The SPL output will be:
# All values explictly set by returned Python tuple
# based on the x,y values from the input tuple
# x is set to: x
# y is set to: y
# z is set to: x+y
The returned tuple may be *sparse*, any attribute value in the tuple
that is ``None`` will be set to their SPL default or copied from
a matching attribute in the input tuple
(same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type),
depending on the operator kind::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,None,a+b)
# The SPL output will be:
# x is set to: x (explictly set by returned Python tuple)
# y is set to: y (set by matching input SPL attribute)
# z is set to: x+y
When a returned tuple has fewer values than attributes in the SPL output
schema the attributes not set by the Python function will be set
to their SPL default or copied from
a matching attribute in the input tuple
(same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type),
depending on the operator kind::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return a,
# The SPL output will be:
# x is set to: x (explictly set by returned Python tuple)
# y is set to: y (set by matching input SPL attribute)
# z is set to: 0 (default int32 value)
When a returned tuple has more values than attributes in the SPL output schema then the additional values are ignored::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,b,a+b,a/b)
# The SPL output will be:
# All values explictly set by returned Python tuple
# based on the x,y values from the input tuple
# x is set to: x
# y is set to: y
# z is set to: x+y
#
# The fourth value in the tuple a/b = x/y is ignored.
Python dictionary
=================
A Python dictionary is converted to an SPL tuple for submission to
the associated output port. An SPL attribute is set from the
dictionary if the dictionary contains a key equal to the attribute
name. The value is used to set the attribute, unless the value is
``None``.
If the value in the dictionary is ``None``, or no matching key exists,
then the attribute value is set to its SPL default or copied from
a matching attribute in the input tuple
(same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type),
depending on the operator kind::
Any keys in the dictionary that do not map to SPL attribute names are ignored.
Python list
===========
When a list is returned, each value is converted to an SPL tuple and
submitted to the output port, in order of the list starting with the
first element (position 0). If the list contains `None` at an index
then no SPL tuple is submitted for that index.
The list must only contain Python tuples, dictionaries or `None`. The list
can contain a mix of valid values.
The list may be empty resulting in no tuples being submitted.
"""
from future.builtins import *
from enum import Enum
import functools
import inspect
import re
import sys
import streamsx.ec as ec
import importlib
############################################
# setup for function inspection
if sys.version_info.major == 3:
_inspect = inspect
elif sys.version_info.major == 2:
import funcsigs
_inspect = funcsigs
else:
raise ValueError("Python version not supported.")
############################################
# Used to recreate instances of decorated operators
# from their module & class name during pickleling (dill)
# See __reduce__ implementation below
def _recreate_op(op_module, op_name):
module_ = importlib.import_module(op_module)
class_ = getattr(module_, op_name)
return class_.__new__(class_)
_OperatorType = Enum('_OperatorType', 'Ignore Source Sink Pipe Filter Primitive')
_OperatorType.Source.spl_template = 'PythonFunctionSource'
_OperatorType.Pipe.spl_template = 'PythonFunctionPipe'
_OperatorType.Sink.spl_template = 'PythonFunctionSink'
_OperatorType.Filter.spl_template = 'PythonFunctionFilter'
_OperatorType.Primitive.spl_template = 'PythonPrimitive'
_SPL_KEYWORDS = {'graph', 'stream', 'public', 'composite', 'input', 'output', 'type', 'config', 'logic',
'window', 'param', 'onTuple', 'onPunct', 'onProcess', 'state', 'stateful', 'mutable',
'if', 'for', 'while', 'break', 'continue', 'return', 'attribute', 'function', 'operator'}
def _valid_identifier(id):
if re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', id) is None or id in _SPL_KEYWORDS:
raise ValueError("{0} is not a valid SPL identifier".format(id))
def _valid_op_parameter(name):
_valid_identifier(name)
if name in ['suppress', 'include']:
raise ValueError("Parameter name {0} is reserved".format(name))
def pipe(wrapped):
"""
Decorator to create an SPL operator from a function.
A pipe SPL operator with a single input port and a single
output port. For each tuple on the input port the
function is called passing the contents of the tuple.
SPL attributes from the tuple are passed by position.
The value returned from the function results in
zero or more tuples being submitted to the operator output
port, see :ref:`submit-from-python`.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.map <map>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required')
return _wrapforsplop(_OperatorType.Pipe, wrapped, 'position', False)
#
# Wrap object for an SPL operator, either
# a callable class or function.
#
def _wrapforsplop(optype, wrapped, style, docpy):
if inspect.isclass(wrapped):
if not callable(wrapped):
raise TypeError('Class must be callable')
_valid_identifier(wrapped.__name__)
class _op_class(wrapped):
__doc__ = wrapped.__doc__
_splpy_wrapped = wrapped
_splpy_optype = optype
_splpy_callable = 'class'
@functools.wraps(wrapped.__init__)
def __init__(self,*args,**kwargs):
super(_op_class, self).__init__(*args,**kwargs)
if ec._is_supported():
ec._save_opc(self)
ec._callable_enter(self)
# Use reduce to save the state of the class and its
# module and operator name.
def __reduce__(self):
if hasattr(self, '__getstate__'):
state = self.__getstate__()
else:
state = self.__dict__
return _recreate_op, (wrapped.__module__, wrapped.__name__), state
def _splpy_shutdown(self, exc_type=None, exc_value=None, traceback=None):
return ec._callable_exit(self, exc_type, exc_value, traceback)
if optype in (_OperatorType.Sink, _OperatorType.Pipe, _OperatorType.Filter):
_op_class._splpy_style = _define_style(wrapped, wrapped.__call__, style)
_op_class._splpy_fixed_count = _define_fixed(_op_class, _op_class.__call__)
else:
_op_class._splpy_style = ''
_op_class._splpy_fixed_count = -1
_op_class._splpy_file = inspect.getsourcefile(wrapped)
_op_class._splpy_docpy = docpy
return _op_class
if not inspect.isfunction(wrapped):
raise TypeError('A function or callable class is required')
_valid_identifier(wrapped.__name__)
#fnstyle =
#if fnstyle == 'tuple':
# @functools.wraps(wrapped)
# def _op_fn(*args):
# return wrapped(args)
#else:
# @functools.wraps(wrapped)
# def _op_fn(*args, **kwargs):
# return wrapped(*args, **kwargs)
_op_fn = wrapped
_op_fn._splpy_optype = optype
_op_fn._splpy_callable = 'function'
_op_fn._splpy_style = _define_style(_op_fn, _op_fn, style)
_op_fn._splpy_fixed_count = _define_fixed(_op_fn, _op_fn)
_op_fn._splpy_file = inspect.getsourcefile(wrapped)
_op_fn._splpy_docpy = docpy
return _op_fn
# define the SPL tuple passing style based
# upon the function signature and the decorator
# style parameter
def _define_style(wrapped, fn, style):
has_args = False
has_kwargs = False
has_positional = False
req_named = False
pmds = _inspect.signature(fn).parameters
itpmds = iter(pmds)
# Skip self
if inspect.isclass(wrapped):
next(itpmds)
pc = 0
for pn in itpmds:
pmd = pmds[pn]
if pmd.kind == _inspect.Parameter.POSITIONAL_ONLY:
raise TypeError('Positional only parameters are not supported:' + pn)
elif pmd.kind == _inspect.Parameter.VAR_POSITIONAL:
has_args = True
elif pmd.kind == _inspect.Parameter.VAR_KEYWORD:
has_kwargs = True
elif pmd.kind == _inspect.Parameter.POSITIONAL_OR_KEYWORD:
has_positional = True
elif pmd.kind == _inspect.Parameter.KEYWORD_ONLY:
if pmd.default is _inspect.Parameter.empty:
req_named = True
pc +=1
# See if the requested style matches the signature.
if style == 'position':
if req_named:
raise TypeError("style='position' not supported with a required named parameter.")
elif pc == 1 and has_kwargs:
raise TypeError("style='position' not supported with single **kwargs parameter.")
elif pc == 1 and has_args:
pass
elif not has_positional:
raise TypeError("style='position' not supported as no positional parameters exist.")
# From an implementation point of view the values
# are passed as a tuple and Python does the correct mapping
style = 'tuple'
elif style == 'name':
if pc == 1 and has_args:
raise TypeError("style='name' not supported with single *args parameter.")
elif pc == 1 and has_kwargs:
raise TypeError("style='name' not supported with single **kwargs parameter.")
elif style is not None:
raise TypeError("style=" + style + " unknown.")
if style is None:
if pc == 1 and has_kwargs:
style = 'dictionary'
elif pc == 1 and has_args:
style = 'tuple'
elif pc == 0:
style = 'tuple'
else:
# Default to by name
style = 'name'
if style == 'tuple' and has_kwargs:
raise TypeError("style='position' not implemented with **kwargs parameter.")
if style == 'name':
raise TypeError("Not yet implemented!")
return style
def _define_fixed(wrapped, callable_):
"""For the callable see how many positional parameters are required"""
is_class = inspect.isclass(wrapped)
style = callable_._splpy_style if hasattr(callable_, '_splpy_style') else wrapped._splpy_style
fixed_count = 0
if style == 'tuple':
sig = _inspect.signature(callable_)
pmds = sig.parameters
itpmds = iter(pmds)
# Skip 'self' for classes
if is_class:
next(itpmds)
for pn in itpmds:
param = pmds[pn]
if param.kind == _inspect.Parameter.POSITIONAL_OR_KEYWORD:
fixed_count += 1
if param.kind == _inspect.Parameter.VAR_POSITIONAL: # *args
fixed_count = -1
break
if param.kind == _inspect.Parameter.VAR_KEYWORD:
break
return fixed_count
class source(object):
"""
Create a source SPL operator from an iterable.
The resulting SPL operator has a single output port.
When decorating a class the class must be iterable
having an ``__iter__`` function. When the SPL operator
is invoked an instance of the class is created
and an iteration is created using ``iter(instance)``.
When decoratiing a function the function must have no
parameters and must return an iterable or iteration.
When the SPL operator is invoked the function is called
and an iteration is created using ``iter(value)``
where ``value`` is the return of the function.
For each value in the iteration SPL zero or more tuples
are submitted to the output port, derived from the value,
see :ref:`submit-from-python`.
If the iteration completes then no more tuples
are submitted and a final punctuation mark
is submitted to the output port.
Example definition::
@spl.source()
class Range(object):
def __init__(self, stop, start=0):
self.start = start
self.stop = stop
def __iter__(self):
return zip(range(self.start, self.stop))
Example SPL invocation::
stream<int32 seq> R = Range() {
param
stop: 100;
}
Args:
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__iter__`` and ``__next__`` can be suppressed
when this decorator wraps a class with context manager
``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with an exception
then the exception is suppressed.
Suppressing an exception raised by ``__iter__`` results in the
source producing an empty iteration. No tuples will be submitted.
Suppressing an exception raised by ``__next__`` results in the
source not producing any tuples for that invocation. Processing
continues with a call to ``__next__``.
Data conversion errors of the value returned by ``__next__`` can
also be suppressed by ``__exit__``.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the value that caused the
exception is not submitted as an SPL tuple.
"""
def __init__(self, docpy=True):
self.style = None
self.docpy = docpy
def __call__(self, wrapped):
return _wrapforsplop(_OperatorType.Source, wrapped, self.style, self.docpy)
class map(object):
"""
Decorator to create a map SPL operator from a callable class or function.
Creates an SPL operator with a single input port and a single
output port. For each tuple on the input port the
callable is called passing the contents of the tuple.
The value returned from the callable results in
zero or more tuples being submitted to the operator output
port, see :ref:`submit-from-python`.
Example definition::
@spl.map()
class AddSeq(object):
\"\"\"Add a sequence number as the last attribute.\"\"\"
def __init__(self):
self.seq = 0
def __call__(self, *tuple_):
id = self.seq
self.seq += 1
return tuple_ + (id,)
Example SPL invocation::
stream<In, tuple<uint64 seq>> InWithSeq = AddSeq(In) { }
Args:
style: How the SPL tuple is passed into Python callable or function, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the tuple that caused the
exception is dropped.
Data conversion errors of the value returned by ``__call__`` can
also be suppressed by ``__exit__``.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the value that caused the
exception is not submitted as an SPL tuple.
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
return _wrapforsplop(_OperatorType.Pipe, wrapped, self.style, self.docpy)
class filter(object):
"""
Decorator that creates a filter SPL operator from a callable class or function.
A filter SPL operator has a single input port and one mandatory
and one optional output port. The schema of each output port
must match the input port. For each tuple on the input port the
callable is called passing the contents of the tuple. if the
function returns a value that evaluates to True then it is
submitted to mandatory output port 0. Otherwise it it submitted to
the second optional output port (1) or discarded if the port is
not specified in the SPL invocation.
Args:
style: How the SPL tuple is passed into Python callable or function, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Example definition::
@spl.filter()
class AttribThreshold(object):
\"\"\"
Filter based upon a single attribute being
above a threshold.
\"\"\"
def __init__(self, attr, threshold):
self.attr = attr
self.threshold = threshold
def __call__(self, **tuple_):
return tuple_[self.attr] > self.threshold:
Example SPL invocation::
stream<rstring id, float64 voltage> Sensors = ...
stream<Sensors> InterestingSensors = AttribThreshold(Sensors) {
param
attr: "voltage";
threshold: 225.0;
}
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the expression is suppressed and the tuple that caused the
exception is dropped.
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
return _wrapforsplop(_OperatorType.Filter, wrapped, self.style, self.docpy)
def ignore(wrapped):
"""
Decorator to ignore a Python function.
If a Python callable is decorated with ``@spl.ignore``
then function is ignored by ``spl-python-extract.py``.
Args:
wrapped: Function that will be ignored.
"""
@functools.wraps(wrapped)
def _ignore(*args, **kwargs):
return wrapped(*args, **kwargs)
_ignore._splpy_optype = _OperatorType.Ignore
_ignore._splpy_file = inspect.getsourcefile(wrapped)
return _ignore
# Defines a function as a sink operator
def sink(wrapped):
"""Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated function
is called passing the contents of the tuple.
.. deprecated:: 1.8
Recommended to use :py:class:`@spl.for_each <for_each>` instead.
"""
if not inspect.isfunction(wrapped):
raise TypeError('A function is required')
return _wrapforsplop(_OperatorType.Sink, wrapped, 'position', False)
# Defines a function as a sink operator
class for_each(object):
"""
Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated callable
is called passing the contents of the tuple.
Example definition::
@spl.for_each()
def PrintTuple(*tuple_):
\"\"\"Print each tuple to standard out.\"\"\"
print(tuple_, flush=True)
Example SPL invocation::
() as PT = PrintTuple(SensorReadings) { }
Args:
style: How the SPL tuple is passed into Python callable, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the expression is suppressed and the tuple that caused the
exception is ignored.
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
return _wrapforsplop(_OperatorType.Sink, wrapped, self.style, self.docpy)
class PrimitiveOperator(object):
"""Primitive operator super class.
Classes decorated with `@spl.primitive_operator` must extend
this class if they have one or more output ports. This class
provides the `submit` method to submit tuples to specified
otuput port.
.. versionadded:: 1.8
"""
def submit(self, port_id, tuple_):
"""Submit a tuple to the output port.
The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted),
``tuple``, ``dict` or ``list`` of those types. For details
on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`.
Args:
port_id: Identifier of the port specified in the
``output_ports`` parameter of the ``@spl.primitive_operator``
decorator.
tuple_: Tuple (or tuples) to be submitted to the output port.
"""
port_index = self._splpy_output_ports[port_id]
ec._submit(self, port_index, tuple_)
def all_ports_ready(self):
"""Notifcation that the operator can submit tuples.
Called when the primitive operator can submit tuples
using :py:meth:`submit`. An operator must not submit
tuples until this method is called or until a port
processing method is called.
Any implementation must not block. A typical use
is to start threads that submit tuples.
An implementation must return a value that allows
the SPL runtime to determine when an operator completes.
An operator completes, and finalizes its output ports
when:
* All input ports (if any) have been finalized.
* All background processing is complete.
The return from ``all_ports_ready`` defines when
background processing, such as threads started by
``all_ports_ready``, is complete. The value is one of:
* A value that evaluates to `False` - No background processing exists.
* A value that evaluates to `True` - Background processing exists and never completes. E.g. a source operator that processes real time events.
* A callable - Background processing is complete when the callable returns. The SPL runtime invokes the callable once (passing no arguments) when the method returns background processing is assumed to be complete.
For example if an implementation starts a single thread then `Thread.join` is returned to complete the operator when the thread completes::
def all_ports_ready(self):
submitter = threading.Thread(target=self._find_and_submit_data)
submitter.start()
return submitter.join
def _find_and_submit_data(self):
...
Returns:
Value indicating active background processing.
This method implementation does nothing and returns ``None``.
"""
return None
class input_port(object):
"""Declare an input port and its processor method.
Instance methods within a class decorated by
:py:class:`spl.primitive_operator <primitive_operator>` declare
input ports by decorating methods with this decorator.
Each tuple arriving on the input port will result in a call
to the processor method passing the stream tuple converted to
a Python representation depending on the style. The style is
determined by the method signature or the `style` parameter,
see :ref:`spl-tuple-to-python`.
The order of the methods within the class define
the order of the ports, so the first port is
the first method decorated with `input_port`.
Args:
style: How the SPL tuple is passed into the method, see :ref:`spl-tuple-to-python`.
.. versionadded:: 1.8
"""
_count = 0
def __init__(self, style=None):
self._style = style
def __call__(self, wrapped):
wrapped._splpy_input_port_seq = input_port._count
wrapped._splpy_input_port_config = self
wrapped._splpy_style = self._style
input_port._count += 1
return wrapped
class primitive_operator(object):
"""Creates an SPL primitive operator with an arbitrary number of input ports and
output ports.
Input ports are declared by decorating an instance method
with :py:meth:`input_port`. The method is the process method
for the input port and is called for each tuple that arrives
at the port. The order of the decorated process methods defines
the order of the ports in the SPL operator, with the first
process method being the first port at index zero.
Output ports are declared by the ``output_ports`` parameter which
is set to a ``list`` of port identifiers. The port identifiers are
arbitrary but must be hashable. Port identifiers allow the ability
to submit tuples "logically' rather than through a port index. Typically
a port identifier will be a `str` or an `enum`. The size of the list
defines the number of output ports with the first identifier in the list
coresponding to the first output port of the operator at index zero.
If the list is empty or not set then the operator has no output ports.
Tuples are submitted to an output port using :py:meth:`~PrimitiveOperator.submit`.
When an operator has output ports it must be a sub-class of
:py:class:`PrimitiveOperator` which provides the
:py:meth:`~PrimitiveOperator.submit` method and the ports
ready notification mechanism :py:meth:`~PrimitiveOperator.all_ports_ready`.
Example definition of an operator with a single input port and two output ports::
@spl.primitive_operator(output_ports=['MATCH', 'NEAR_MATCH'])
class SelectCustomers(spl.PrimitiveOperator):
\"\"\" Score customers using a model.
Customers that are a good match are submitted to port 0 ('MATCH')
while customers that are a near match are submitted to port 1 ('NEAR_MATCH').
Customers that are not a good or near match are not submitted to any port.
\"\"\"
def __init__(self, match, near_match):
self.match = match
self.near_match = near_match
@spl.input_port()
def customers(self, **tuple_):
customer_score = self.score(tuple_)
if customer_score >= self.match:
self.submit('MATCH', tuple_)
elif customer_score >= self.near_match:
self.submit('NEAR_MATCH', tuple_)
def score(self, **customer):
# Actual model scoring omitted
score = ...
return score
Example SPL invocation::
(stream<Customers> MakeOffer; stream<Customers> ImproveOffer>) = SelectCustomers(Customers) {
param
match: 0.9;
near_match: 0.8;
}
Args:
output_ports(list): List of identifiers for output ports.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
.. versionadded:: 1.8
"""
def __init__(self, output_ports=None,docpy=True):
self._docpy = docpy
self._output_ports = output_ports
def __call__(self, wrapped):
if not inspect.isclass(wrapped):
raise TypeError('A class is required:' + str(wrapped))
_valid_identifier(wrapped.__name__)
cls = _wrapforsplop(_OperatorType.Primitive, wrapped, None, self._docpy)
inputs = dict()
for fname, fn in inspect.getmembers(wrapped):
if hasattr(fn, '_splpy_input_port_seq'):
if sys.version_info.major == 2:
fn = fn.__func__
inputs[fn._splpy_input_port_seq] = fn
cls._splpy_input_ports = []
cls._splpy_style = []
cls._splpy_fixed_count = []
for seq in sorted(inputs.keys()):
fn = inputs[seq]
fn._splpy_input_port_id = len(cls._splpy_input_ports)
fn._splpy_style = _define_style(wrapped, fn, fn._splpy_style)
fn._splpy_fixed_count = _define_fixed(cls, fn)
cls._splpy_input_ports.append(fn)
cls._splpy_style.append(fn._splpy_style)
cls._splpy_fixed_count.append(fn._splpy_fixed_count)
cls._splpy_output_ports = dict()
if self._output_ports:
for i in range(len(self._output_ports)):
cls._splpy_output_ports[self._output_ports[i]] = i
return cls
|
SerialMonitor.py
|
#
# Serial Monitor Utility
#
# Rob Dobson 2020-21
#
import threading
import time
import sys
import os
import logging
import serial
from serial.serialutil import SerialException
import argparse
import KeyboardUtils
class LogHelper:
def __init__(self, logToFile, logsFolder):
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.DEBUG)
self._curLine = ""
# Log to file if required
if logToFile:
# Check the log folder exists
if not os.path.exists(logsFolder):
os.mkdir(logsFolder)
# Form log file name
logFileName = time.strftime("%Y%m%d-%H%M%S") + ".log"
logFileName = os.path.join(logsFolder, logFileName)
logfh = logging.FileHandler(logFileName)
logfh.setLevel(logging.DEBUG)
self._logger.addHandler(logfh)
def getLogger(self):
return self._logger
def info(self, logMsg):
self._logger.info(logMsg)
class SerialIO:
def __init__(self, dataCallback, logHelper):
self._running = False
self._serialThread = None
self._dataCallback = dataCallback
self._isTestMode = False
self._logHelper = logHelper
self._serial = None
def open(self, serialPort, serialBaud, testMode):
self._serialPort = serialPort
self._serialBaud = serialBaud
self._isTestMode = testMode
if self._isTestMode:
self._serialThreadStart(self._testLoop)
else:
self._openSerial()
def close(self):
try:
self._serialThreadStop()
except Exception as excp:
print("Error stopping and closing port", excp)
def send(self, toSend):
if self._isTestMode:
print("Test sending", toSend)
else:
asciiOut = (toSend + '\n').encode("ascii")
# print(asciiOut)
self._serial.write(asciiOut)
def _serialThreadStart(self, threadFn):
if self._running:
raise RuntimeError("reader already running")
self._serialThread = threading.Thread(target=threadFn)
self._serialThread.setDaemon(True)
self._running = True
self._serialThread.start()
def _serialThreadStop(self):
self._running = False
try:
self._serialThread.join()
except:
pass
self._serialThread = None
def _serialRxThreadFn(self):
while self._running:
try:
numWaiting = self._serial.in_waiting
if numWaiting < 1:
time.sleep(0.001)
continue
rxBytes = self._serial.read(numWaiting)
self._dataCallback(rxBytes)
except SerialException as excp:
print("Error in serial " + str(excp))
try:
# Try to reopen port
time.sleep(2)
self.close()
self._openSerial()
except Exception as excp2:
print("Error reopening serial " + str(excp2))
except Exception as excp3:
print("Non-serial error " + str(excp3))
def _openSerial(self):
# Open the serial connection
try:
self._serial = serial.Serial(
port=self._serialPort,
baudrate=self._serialBaud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
except Exception as excp:
print("Serial Port " + str(self._serialPort) + " " + str(excp))
return False
try:
self._serial.set_buffer_size(20000, None)
except Exception:
print("Failed to set serial buffer size")
print(f"SerialMonitor port {self._serialPort} baudrate {self._serialBaud}")
if not self._running:
# Start serial reader
self._serialThreadStart(self._serialRxThreadFn)
def _testLoop(self):
while self._running:
self._dataCallback(b"this is a test\r\n")
time.sleep(1)
class InputLine:
RET_OK = 0
RET_EXIT = 1
RET_ENTER = 2
def __init__(self):
self._inLineBuf = ""
self._inLinePos = 0
self._history = []
self._historyCurPos = 0
def isActive(self):
return len(self._inLineBuf) != 0
def show(self):
sys.stdout.write(Terminal.LINE_CLEAR_ALL + Terminal.CURSOR_LINE_START + self._inLineBuf)
if len(self._inLineBuf) != self._inLinePos:
Terminal.cursorLeft(len(self._inLineBuf) - self._inLinePos)
sys.stdout.flush()
def getLine(self):
return self._inLineBuf
def clear(self):
self._inLineBuf = ""
self._inLinePos = 0
self.show()
def handleKey(self, keyName):
if len(keyName) == 1:
# Printable character
# prevCmdLen = len(self._inLineBuf)
if len(self._inLineBuf) != self._inLinePos:
self._inLineBuf = self._inLineBuf[:self._inLinePos] + keyName + self._inLineBuf[self._inLinePos:]
else:
self._inLineBuf += keyName
self._inLinePos += 1
self.show()
# print(self._inLineBuf)
elif keyName == 'CTRL-C' or keyName == 'ESC':
return self.RET_EXIT
elif keyName == 'BACKSPACE':
# prevCmdLen = len(self._inLineBuf)
if self._inLinePos != 0 and len(self._inLineBuf) != 0:
self._inLineBuf = self._inLineBuf[:self._inLinePos-1] + self._inLineBuf[self._inLinePos:]
self._inLinePos -= 1
self.show()
elif keyName == 'DEL':
if self._inLinePos != len(self._inLineBuf) and len(self._inLineBuf) != 0:
self._inLineBuf = self._inLineBuf[:self._inLinePos] + self._inLineBuf[self._inLinePos+1:]
self.show()
elif keyName == 'LEFT':
if self._inLinePos > 0:
self._inLinePos -= 1
self.show()
elif keyName == 'RIGHT':
if self._inLinePos < len(self._inLineBuf):
self._inLinePos += 1
self.show()
elif keyName == 'HOME':
self._inLinePos = 0
self.show()
elif keyName == 'END':
self._inLinePos = len(self._inLineBuf)
self.show()
elif keyName == 'UP':
if len(self._history) > 0:
self._historyCurPos = (len(self._history) + self._historyCurPos - 1) % len(self._history)
self._inLineBuf = self._history[self._historyCurPos]
self._inLinePos = len(self._inLineBuf)
self.show()
elif keyName == 'DOWN':
if len(self._history) > 0:
if (self._historyCurPos == len(self._history)):
self._historyCurPos = 0
else:
self._historyCurPos = (len(self._history) + self._historyCurPos + 1) % len(self._history)
self._inLineBuf = self._history[self._historyCurPos]
self._inLinePos = len(self._inLineBuf)
self.show()
elif keyName == 'ENTER':
if len(self._inLineBuf) != 0:
if len(self._history) == 0 or self._history[-1] != self._inLineBuf:
self._history.append(self._inLineBuf)
self._historyCurPos = len(self._history)
return self.RET_ENTER
# else:
# sys.stdout.write(Terminal.CURSOR_DOWN)
# print(f"Special {keyName}")
return self.RET_OK
class Terminal:
CURSOR_UP = "\u001b[1A"
CURSOR_DOWN = "\u001b[1B"
CURSOR_RIGHT = "\u001b[1C"
CURSOR_LEFT = "\u001b[1D"
CURSOR_LEFT_N = "\u001b[{}D"
CURSOR_LINE_START = "\u001b[1000D"
CURSOR_BOTTOM_LEFT = "\u001b[1000;1H"
CURSOR_SAVE = "\u001b[s"
CURSOR_RESTORE = "\u001b[u"
LINE_CLEAR_TO_END = "\u001b[0K"
LINE_CLEAR_TO_START = "\u001b[1K"
LINE_CLEAR_ALL = "\u001b[2K"
def __init__(self, logHelper, stripTermColours):
self._logHelper = logHelper
self._stripTermColours = stripTermColours
self._inputLine = InputLine()
self._outLineBuf = ""
self._serialInEsc = False
self._serialLastChCode = 0
self._running = True
self._keyboardUtils = KeyboardUtils.KeyboardUtils(self._handleKey)
def close(self):
if (len(self._outLineBuf)) > 0:
self._sendToLog(self._outLineBuf)
self._running = False
self._keyboardUtils.close()
def setOutput(self, outputConn):
self._outputConn = outputConn
def isOpen(self):
return self._running
@classmethod
def cursorLeft(cls, moves):
oStr = cls.CURSOR_LEFT_N.format(moves)
sys.stdout.write(oStr)
def handleSerialData(self, serialData: bytes):
termOutStr = ""
for chCode in serialData:
termAmended = False
termStr = ""
if self._serialInEsc:
if chCode == ord('m'): # assume escaped output is only colour info (which ends with 'm')
self._serialInEsc = False
if self._stripTermColours:
termAmended = True
termStr = ""
elif chCode == 27:
self._serialInEsc = True
if self._stripTermColours:
termAmended = True
termStr = ""
elif chCode == 10:
if self._serialLastChCode != 13:
self._sendToLog(self._outLineBuf)
self._outLineBuf = ""
elif chCode == 13:
if self._serialLastChCode != 10:
self._sendToLog(self._outLineBuf)
self._outLineBuf = ""
elif chCode == 9 or (chCode >= 32 and chCode < 127):
self._outLineBuf += chr(chCode)
else:
termStr = f"/x{chCode:02x}"
self._outLineBuf += termStr
termAmended = True
self._serialLastChCode = chCode
termOutStr += chr(chCode) if not termAmended else termStr
self._sendToOutWindow(termOutStr)
def _sendToLog(self, outStr):
self._logHelper.info(outStr)
def _handleKey(self, keyName):
# print("Handle key ", keyName)
rslt = self._inputLine.handleKey(keyName)
if rslt == InputLine.RET_EXIT:
self._running = False
elif rslt == InputLine.RET_ENTER:
# Send to serial
outLine = self._inputLine.getLine()
# print("Sending " + outLine)
self._outputConn.send(outLine)
self._inputLine.clear()
def _sendToOutWindow(self, serialData):
if self._inputLine.isActive():
sys.stdout.write(self.CURSOR_UP + self.LINE_CLEAR_ALL + self.CURSOR_LINE_START)
sys.stdout.write(serialData)
sys.stdout.write(self.CURSOR_DOWN)
self._inputLine.show()
else:
sys.stdout.write(serialData)
sys.stdout.flush()
# Handle arguments
argparser = argparse.ArgumentParser(description='Serial Monitor')
argparser.add_argument('serialPort', action='store')
argparser.add_argument('-b', action='store', default=2000000, dest='serialbaud')
argparser.add_argument('-g', action='store_true', dest='logToFile',
help='log to a file in ./logs folder with date-time based name')
argparser.add_argument('-c', action='store_true', dest='retainColors',
help='retain colours in terminal output (log is always stripped of colours)')
args = argparser.parse_args()
# Logging
logHelper = LogHelper(args.logToFile, "logs")
# Terminal
terminal = Terminal(logHelper, not args.retainColors)
# Serial
serialIO = SerialIO(terminal.handleSerialData, logHelper)
terminal.setOutput(serialIO)
serialIO.open(args.serialPort, args.serialbaud, False)
try:
while terminal.isOpen():
pass
except KeyboardInterrupt:
pass
serialIO.close()
terminal.close()
print()
|
wr_arp.py
|
# This is Control Plane Assistent test for Warm-Reboot.
# The test first start Ferret server, implemented in Python. Then initiate Warm-Rebbot procedure.
# While the host in Warm-Reboot test continiously sending ARP request to the Vlan member ports and
# expect to receive ARP replies. The test will fail as soon as there is no replies for more than 25 seconds
# for one of the Vlan member ports
# To Run the test from the command line:
# ptf --test-dir 1 1.ArpTest --platform-dir ptftests --platform remote -t "config_file='/tmp/vxlan_decap.json';ferret_ip='10.64.246.21';dut_ssh='10.3.147.243';how_long=370"
#
import time
import json
import subprocess
import datetime
import traceback
import sys
import socket
import threading
from collections import defaultdict
from pprint import pprint
from Queue import Queue
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.dataplane as dataplane
import ptf.testutils as testutils
from device_connection import DeviceConnection
class ArpTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
log_file_name = '/tmp/wr_arp_test.log'
self.log_fp = open(log_file_name, 'a')
self.log_fp.write("\nNew test:\n")
self.q_to_dut = Queue()
self.q_from_dut = Queue()
return
def __del__(self):
self.log_fp.close()
return
def log(self, message):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
self.log_fp.flush()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def dut_exec_cmd(self, cmd):
self.log("Executing cmd='{}'".format(cmd))
stdout, stderr, return_code = self.dut_connection.execCommand(cmd, timeout=30)
self.log("return_code={}, stdout={}, stderr={}".format(return_code, stdout, stderr))
if return_code == 0:
return True, str(stdout)
elif return_code == 255:
return True, str(stdout)
else:
return False, "return code: %d. stdout = '%s' stderr = '%s'" % (return_code, str(stdout), str(stderr))
def dut_thr(self, q_from, q_to):
while True:
cmd = q_from.get()
if cmd == 'WR':
self.log("Rebooting remote side")
res, res_text = self.dut_exec_cmd("sudo warm-reboot -c {}".format(self.ferret_ip))
if res:
q_to.put('ok: %s' % res_text)
else:
q_to.put('error: %s' % res_text)
elif cmd == 'uptime':
self.log("Check uptime remote side")
res, res_text = self.dut_exec_cmd("uptime -s")
if res:
q_to.put('ok: %s' % res_text)
else:
q_to.put('error: %s' % res_text)
elif cmd == 'quit':
q_to.put("done")
break
else:
self.log('Unsupported cmd: %s' % cmd)
q_to.put("error: unsupported cmd: %s" % cmd)
self.log("Quiting from dut_thr")
return
def test_port_thr(self):
self.log("test_port_thr started")
while time.time() < self.stop_at:
for test in self.tests:
for port in test['acc_ports']:
nr_rcvd = self.testPort(port)
self.records[port][time.time()] = nr_rcvd
self.log("Quiting from test_port_thr")
return
def readMacs(self):
addrs = {}
for intf in os.listdir('/sys/class/net'):
if os.path.isdir('/sys/class/net/%s' % intf):
with open('/sys/class/net/%s/address' % intf) as fp:
addrs[intf] = fp.read().strip()
return addrs
def generate_VlanPrefixes(self, gw, prefixlen, acc_ports):
res = {}
n_hosts = 2**(32 - prefixlen) - 3
nr_of_dataplane_ports = len(self.dataplane.ports)
if nr_of_dataplane_ports > n_hosts:
raise Exception("The prefix len size is too small for the test")
gw_addr_n = struct.unpack(">I", socket.inet_aton(gw))[0]
mask = (2**32 - 1) ^ (2**(32 - prefixlen) - 1)
net_addr_n = gw_addr_n & mask
addr = 1
for port in acc_ports:
while True:
host_addr_n = net_addr_n + addr
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
if host_ip != gw:
break
else:
addr += 1 # skip gw
res[port] = host_ip
addr += 1
return res
def generatePkts(self, gw, port_ip, port_mac):
pkt = testutils.simple_arp_packet(
ip_snd=port_ip,
ip_tgt=gw,
eth_src=port_mac,
hw_snd=port_mac,
)
exp_pkt = testutils.simple_arp_packet(
ip_snd=gw,
ip_tgt=port_ip,
eth_src=self.dut_mac,
eth_dst=port_mac,
hw_snd=self.dut_mac,
hw_tgt=port_mac,
arp_op=2,
)
return str(pkt), str(exp_pkt)
def generatePackets(self):
self.gen_pkts = {}
for test in self.tests:
for port in test['acc_ports']:
gw = test['vlan_gw']
port_ip = test['vlan_ip_prefixes'][port]
port_mac = self.ptf_mac_addrs['eth%d' % port]
self.gen_pkts[port] = self.generatePkts(gw, port_ip, port_mac)
return
def get_param(self, param_name, required=True, default = None):
params = testutils.test_params_get()
if param_name not in params:
if required:
raise Exception("required parameter '%s' is not presented" % param_name)
else:
return default
else:
return params[param_name]
def setUp(self):
self.dataplane = ptf.dataplane_instance
config = self.get_param('config_file')
self.ferret_ip = self.get_param('ferret_ip')
self.dut_ssh = self.get_param('dut_ssh')
self.dut_username = self.get_param('dut_username')
self.dut_password = self.get_param('dut_password')
self.dut_connection = DeviceConnection(self.dut_ssh, username=self.dut_username, password=self.dut_password)
self.how_long = int(self.get_param('how_long', required=False, default=300))
if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)
with open(config) as fp:
graph = json.load(fp)
self.tests = []
vni_base = 0
for name, data in graph['minigraph_vlans'].items():
test = {}
test['acc_ports'] = [graph['minigraph_port_indices'][member] for member in data['members']]
vlan_id = int(name.replace('Vlan', ''))
test['vni'] = vni_base + vlan_id
gw = None
prefixlen = None
for d in graph['minigraph_vlan_interfaces']:
if d['attachto'] == name:
gw = d['addr']
prefixlen = int(d['prefixlen'])
break
else:
raise Exception("Vlan '%s' is not found" % name)
test['vlan_gw'] = gw
test['vlan_ip_prefixes'] = self.generate_VlanPrefixes(gw, prefixlen, test['acc_ports'])
self.tests.append(test)
self.dut_mac = graph['dut_mac']
self.ptf_mac_addrs = self.readMacs()
self.generatePackets()
self.cmd(["supervisorctl", "restart", "ferret"])
self.dataplane.flush()
return
def tearDown(self):
self.cmd(["supervisorctl", "stop", "ferret"])
return
def runTest(self):
print
thr = threading.Thread(target=self.dut_thr, kwargs={'q_from': self.q_to_dut, 'q_to': self.q_from_dut})
thr.setDaemon(True)
thr.start()
uptime_before = self.req_dut('uptime')
if uptime_before.startswith('error'):
self.log("DUT returned error for first uptime request")
self.req_dut('quit')
self.assertTrue(False, "DUT returned error for first uptime request")
self.records = defaultdict(dict)
self.stop_at = time.time() + self.how_long
test_port_thr = threading.Thread(target=self.test_port_thr)
test_port_thr.setDaemon(True)
test_port_thr.start()
self.log("Issuing WR command")
result = self.req_dut('WR')
if result.startswith('ok'):
self.log("WR OK!")
else:
self.log("Error in WR")
self.req_dut('quit')
self.assertTrue(False, "Error in WR")
self.assertTrue(time.time() < self.stop_at, "warm-reboot took to long")
test_port_thr.join(timeout=self.how_long)
if test_port_thr.isAlive():
self.log("Timed out waiting for warm reboot")
self.req_dut('quit')
self.assertTrue(False, "Timed out waiting for warm reboot")
uptime_after = self.req_dut('uptime')
if uptime_after.startswith('error'):
self.log("DUT returned error for second uptime request")
self.req_dut('quit')
self.assertTrue(False, "DUT returned error for second uptime request")
self.req_dut('quit')
if uptime_before == uptime_after:
self.log("The DUT wasn't rebooted. Uptime: %s vs %s" % (uptime_before, uptime_after))
self.assertTrue(uptime_before != uptime_after, "The DUT wasn't rebooted. Uptime: %s vs %s" % (uptime_before, uptime_after))
# check that every port didn't have pauses more than 25 seconds
pauses = defaultdict(list)
for port, data in self.records.items():
was_active = True
last_inactive = None
for t in sorted(data.keys()):
active = data[t] > 0
if was_active and not active:
last_inactive = t
elif not was_active and active:
pauses[port].append(t - last_inactive)
was_active = active
if not was_active:
pauses[port].append(sorted(data.keys())[-1] - last_inactive)
m_pauses = { port:max(pauses[port]) for port in pauses.keys() if max(pauses[port]) > 25 }
for port in m_pauses.keys():
self.log("Port eth%d. Max pause in arp_response %d sec" % (port, int(m_pauses[port])))
print
sys.stdout.flush()
self.assertTrue(len(m_pauses) == 0, "Too long pauses in arp responses")
return
def testPort(self, port):
pkt, exp_pkt = self.gen_pkts[port]
testutils.send_packet(self, port, pkt)
nr_rcvd = testutils.count_matched_packets(self, exp_pkt, port, timeout=0.2)
return nr_rcvd
def req_dut(self, cmd):
self.log("cmd: %s" % cmd)
self.q_to_dut.put(cmd)
reply = self.q_from_dut.get()
self.log("reply: %s" % reply)
return reply
|
multi_process_runner_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_25_sec():
logging.error('foo')
time.sleep(100)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_25_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=25)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
for i in range(50):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
time.sleep(1)
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
list_stdout=True,
timeout=5)
list_to_assert = cm.exception.mpr_result.stdout
# We should see 5 iterations from worker and ps, however sometime on TAP
# due to CPU throttling and slugginess of msan/asan build, this became
# flaky. Therefore we allow more margin of errors to only check the first
# 3 iterations.
for job in ['worker', 'ps']:
for iteration in range(0, 3):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
if __name__ == '__main__':
multi_process_runner.test_main()
|
tasks.py
|
#!/usr/local/bin/python3
# coding: utf-8
# ytdlbot - tasks.py
# 12/29/21 14:57
#
__author__ = "Benny <benny.think@gmail.com>"
import json
import logging
import os
import pathlib
import re
import subprocess
import tempfile
import threading
import time
from urllib.parse import quote_plus
import psutil
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from celery import Celery
from celery.worker.control import Panel
from pyrogram import idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from requests_toolbelt.multipart.encoder import MultipartEncoder
from client_init import create_app
from config import (ARCHIVE_ID, AUDIO_FORMAT, BROKER, ENABLE_CELERY,
ENABLE_VIP, TG_MAX_SIZE, WORKERS)
from constant import BotText
from db import Redis
from downloader import (edit_text, sizeof_fmt, tqdm_progress, upload_hook,
ytdl_download)
from limit import VIP
from utils import (apply_log_formatter, auto_restart, customize_logger,
get_metadata, get_revision, get_user_settings)
customize_logger(["pyrogram.client", "pyrogram.session.session", "pyrogram.connection.connection"])
apply_log_formatter()
bot_text = BotText()
logging.getLogger('apscheduler.executors.default').propagate = False
# celery -A tasks worker --loglevel=info --pool=solo
# app = Celery('celery', broker=BROKER, accept_content=['pickle'], task_serializer='pickle')
app = Celery('tasks', broker=BROKER)
celery_client = create_app(":memory:")
def get_messages(chat_id, message_id):
try:
return celery_client.get_messages(chat_id, message_id)
except ConnectionError as e:
logging.critical("WTH!!! %s", e)
celery_client.start()
return celery_client.get_messages(chat_id, message_id)
@app.task()
def ytdl_download_task(chat_id, message_id, url):
logging.info("YouTube celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
ytdl_normal_download(bot_msg, celery_client, url)
logging.info("YouTube celery tasks ended.")
@app.task()
def audio_task(chat_id, message_id):
logging.info("Audio celery tasks started for %s-%s", chat_id, message_id)
bot_msg = get_messages(chat_id, message_id)
normal_audio(bot_msg, celery_client)
logging.info("Audio celery tasks ended.")
def get_unique_clink(clink, settings):
try:
unique = "{}?p={}{}".format(clink, *settings[1:])
except IndexError:
unique = clink
return unique
@app.task()
def direct_download_task(chat_id, message_id, url):
logging.info("Direct download celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
direct_normal_download(bot_msg, celery_client, url)
logging.info("Direct download celery tasks ended.")
def forward_video(chat_id, url, client):
red = Redis()
vip = VIP()
settings = get_user_settings(str(chat_id))
clink = vip.extract_canonical_link(url)
unique = get_unique_clink(clink, settings)
cache = red.get_send_cache(unique)
if not cache:
return False
for uid, mid in cache.items():
uid, mid = int(uid), json.loads(mid)
try:
fwd_msg = client.forward_messages(chat_id, uid, mid)
if not fwd_msg:
raise ValueError("Failed to forward message")
red.update_metrics("cache_hit")
if not isinstance(fwd_msg, list):
fwd_msg = [fwd_msg]
for fwd in fwd_msg:
if ENABLE_VIP:
file_size = getattr(fwd.document, "file_size", None) or getattr(fwd.video, "file_size", 1024)
# TODO: forward file size may exceed the limit
vip.use_quota(chat_id, file_size)
red.add_send_cache(unique, chat_id, fwd.message_id)
return True
except Exception as e:
logging.error("Failed to forward message %s", e)
red.del_send_cache(unique, uid)
red.update_metrics("cache_miss")
def ytdl_download_entrance(bot_msg, client, url):
chat_id = bot_msg.chat.id
if forward_video(chat_id, url, client):
return
mode = get_user_settings(str(chat_id))[-1]
if ENABLE_CELERY and mode in [None, "Celery"]:
ytdl_download_task.delay(chat_id, bot_msg.message_id, url)
else:
ytdl_normal_download(bot_msg, client, url)
def direct_download_entrance(bot_msg, client, url):
if ENABLE_CELERY:
# TODO disable it for now
direct_normal_download(bot_msg, client, url)
# direct_download_task.delay(bot_msg.chat.id, bot_msg.message_id, url)
else:
direct_normal_download(bot_msg, client, url)
def audio_entrance(bot_msg, client):
if ENABLE_CELERY:
audio_task.delay(bot_msg.chat.id, bot_msg.message_id)
else:
normal_audio(bot_msg, client)
def direct_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
vip = VIP()
length = 0
if ENABLE_VIP:
remain, _, _ = vip.check_remaining_quota(chat_id)
try:
head_req = requests.head(url, headers=headers)
length = int(head_req.headers.get("content-length"))
except (TypeError, requests.exceptions.RequestException):
length = 0
if remain < length:
bot_msg.reply_text(f"Üzgünüz, kotanıza ulaştınız.\n")
return
req = None
try:
req = requests.get(url, headers=headers, stream=True)
length = int(req.headers.get("content-length"))
filename = re.findall("filename=(.+)", req.headers.get("content-disposition"))[0]
except TypeError:
filename = getattr(req, "url", "").rsplit("/")[-1]
except Exception as e:
bot_msg.edit_text(f"Indirme Hatalı!❌\n\n```{e}```", disable_web_page_preview=True)
return
if not filename:
filename = quote_plus(url)
with tempfile.TemporaryDirectory(prefix="ytdl-") as f:
filepath = f"{f}/{filename}"
# consume the req.content
downloaded = 0
for chunk in req.iter_content(1024 * 1024):
text = tqdm_progress("⏬ Indiriliyor...", length, downloaded)
edit_text(bot_msg, text)
with open(filepath, "ab") as fp:
fp.write(chunk)
downloaded += len(chunk)
logging.info("Downloaded file %s", filename)
st_size = os.stat(filepath).st_size
if ENABLE_VIP:
vip.use_quota(chat_id, st_size)
client.send_chat_action(chat_id, "upload_document")
client.send_document(bot_msg.chat.id, filepath,
caption=f"filesize: {sizeof_fmt(st_size)}",
progress=upload_hook, progress_args=(bot_msg,),
)
bot_msg.edit_text(f"✅ İndirme Tamamlandı!")
def normal_audio(bot_msg, client):
chat_id = bot_msg.chat.id
fn = getattr(bot_msg.video, "file_name", None) or getattr(bot_msg.document, "file_name", None)
with tempfile.TemporaryDirectory(prefix="ytdl-") as tmp:
logging.info("downloading to %s", tmp)
base_path = pathlib.Path(tmp)
video_path = base_path.joinpath(fn)
audio = base_path.joinpath(fn).with_suffix(f".{AUDIO_FORMAT}")
client.send_chat_action(chat_id, 'record_video_note')
client.download_media(bot_msg, video_path)
logging.info("downloading complete %s", video_path)
# execute ffmpeg
client.send_chat_action(chat_id, 'record_audio')
try:
subprocess.check_output(["ffmpeg", "-y", "-i", video_path, "-vn", "-acodec", "copy", audio])
except subprocess.CalledProcessError:
# CPU consuming if re-encoding.
subprocess.check_output(["ffmpeg", "-y", "-i", video_path, audio])
client.send_chat_action(chat_id, 'upload_audio')
client.send_audio(chat_id, audio)
Redis().update_metrics("audio_success")
def get_dl_source():
worker_name = os.getenv("WORKER_NAME")
if worker_name:
return f"👤 Kullanıcı {worker_name}"
return ""
def upload_transfer_sh(paths: list) -> "str":
d = {p.name: (p.name, p.open("rb")) for p in paths}
m = MultipartEncoder(fields=d)
headers = {'Content-Type': m.content_type}
try:
req = requests.post("https://transfer.sh", data=m, headers=headers)
return re.sub(r"https://", "\nhttps://", req.text)
except requests.exceptions.RequestException as e:
return f"Yükleme Hatalı!❌\n\n```{e}```"
def ytdl_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
temp_dir = tempfile.TemporaryDirectory(prefix="ytdl-")
red = Redis()
result = ytdl_download(url, temp_dir.name, bot_msg)
logging.info("Download complete.")
markup = InlineKeyboardMarkup(
[
[ # First row
InlineKeyboardButton( # Generates a callback query when pressed
f"Ses Dönüştür [ {AUDIO_FORMAT} ]",
callback_data="convert"
)
]
]
)
if result["status"]:
client.send_chat_action(chat_id, 'upload_document')
video_paths = result["filepath"]
bot_msg.edit_text('İndirme tamamlandı. Gönderiliyor...')
for video_path in video_paths:
# normally there's only one video in that path...
filename = video_path.name
remain = bot_text.remaining_quota_caption(chat_id)
st_size = os.stat(video_path).st_size
size = sizeof_fmt(st_size)
if st_size > TG_MAX_SIZE:
t = f"Video boyutunuz {size} bu Telegram için çok büyük. Transfer.sh'a yükleyeceğim"
bot_msg.edit_text(t)
client.send_chat_action(chat_id, 'upload_document')
client.send_message(chat_id, upload_transfer_sh(video_paths))
return
meta = get_metadata(video_path)
worker = get_dl_source()
cap = f"`📕Adi: {filename}`\n\nℹBilgi: {meta['width']}x{meta['height']} {size} {meta['duration']}s" \
f"\n{remain}\n{worker}"
settings = get_user_settings(str(chat_id))
if ARCHIVE_ID:
chat_id = ARCHIVE_ID
if settings[2] == "document":
logging.info("Sending as document")
res_msg = client.send_document(chat_id, video_path,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
thumb=meta["thumb"]
)
elif settings[2] == "audio":
logging.info("Sending as audio")
res_msg = client.send_audio(chat_id, video_path,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
)
else:
logging.info("Sending as video")
res_msg = client.send_video(chat_id, video_path,
supports_streaming=True,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
**meta
)
clink = VIP().extract_canonical_link(url)
unique = get_unique_clink(clink, settings)
red.add_send_cache(unique, res_msg.chat.id, res_msg.message_id)
red.update_metrics("video_success")
if ARCHIVE_ID:
client.forward_messages(bot_msg.chat.id, ARCHIVE_ID, res_msg.message_id)
bot_msg.edit_text('✅ İndirme Tamamlandı!')
else:
client.send_chat_action(chat_id, 'typing')
tb = result["error"][0:4000]
bot_msg.edit_text(f"Yükleme Hatalı!❌\n\n```{tb}```", disable_web_page_preview=True)
temp_dir.cleanup()
@Panel.register
def ping_revision(*args):
return get_revision()
@Panel.register
def hot_patch(*args):
app_path = pathlib.Path().cwd().parent
logging.info("Hot patching on path %s...", app_path)
apk_install = "xargs apk add < apk.txt"
pip_install = "pip install -r requirements.txt"
unset = "git config --unset http.https://github.com/.extraheader"
pull_unshallow = "git pull origin --unshallow"
pull = "git pull"
subprocess.call(unset, shell=True, cwd=app_path)
if subprocess.call(pull_unshallow, shell=True, cwd=app_path) != 0:
logging.info("Already unshallow, pulling now...")
subprocess.call(pull, shell=True, cwd=app_path)
logging.info("Code is updated, applying hot patch now...")
subprocess.call(apk_install, shell=True, cwd=app_path)
subprocess.call(pip_install, shell=True, cwd=app_path)
psutil.Process().kill()
def run_celery():
argv = [
"-A", "tasks", 'worker', '--loglevel=info',
"--pool=threads", f"--concurrency={WORKERS}",
"-n", os.getenv("WORKER_NAME", "")
]
app.worker_main(argv)
if __name__ == '__main__':
celery_client.start()
print("Bootstrapping Celery worker now.....")
time.sleep(5)
threading.Thread(target=run_celery, daemon=True).start()
scheduler = BackgroundScheduler(timezone="Asia/Shanghai")
scheduler.add_job(auto_restart, 'interval', seconds=5)
scheduler.start()
idle()
celery_client.stop()
|
work_controller.py
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import simplejson as json
except ImportError:
import json
import os
from os.path import basename
import time
import pickle
import logging
import threading
from flask.globals import request
from flask.wrappers import Response
from rapid.client.communicator.client_communicator import ClientCommunicator
from rapid.lib import api_key_required
from rapid.lib.store_service import StoreService
from rapid.lib.utils import UpgradeUtil
from rapid.lib.work_request import WorkRequest
from rapid.client.executor import Executor
from ...lib.base_controller import BaseController
from ...lib.version import Version
logger = logging.getLogger("rapid")
# pylint: disable=broad-except
class WorkController(BaseController):
executors = []
def __init__(self):
self.app = None
def register_url_rules(self, flask_app):
"""
This is the entry point for the controller. All URL ruls are add_url_rule(d) here.
:param flask_app: The app you want to add a url to
:return: void
"""
flask_app.add_url_rule('/work/request', 'work_request', api_key_required(self.work_request))
flask_app.add_url_rule('/work/execute', 'work_execute', api_key_required(self.work_execute), methods=['POST'])
flask_app.add_url_rule('/work/cancel/<path:action_instance_id>', 'work_cancel', api_key_required(self.work_cancel), methods=['POST'])
self.app = flask_app
def work_request(self):
current_work = StoreService.get_executors()
current_work = current_work + self.__get_quarantined_items()
work = {'current_work': current_work,
'hostname': self.app.rapid_config.hostname}
if self.can_work_on() and self.check_version(request):
return Response(json.dumps(work), content_type='application/json', headers={Version.HEADER: Version.get_version()})
return Response(json.dumps(work), status=423, content_type='application/json', headers={Version.HEADER: Version.get_version()})
def __get_quarantined_items(self):
items = []
quarantine_directory = self.app.rapid_config.quarantine_directory
if quarantine_directory:
communicator = None
if not os.path.isdir(quarantine_directory):
os.makedirs(quarantine_directory)
for item in os.listdir(quarantine_directory):
if item in ['.', '..']:
continue
try:
items.append({'action_instance_id': int(item), 'pid': 'quarantined'})
if communicator is None:
communicator = ClientCommunicator(self.app.rapid_config.master_uri,
self.app.rapid_config.quarantine_directory,
verify_certs=self.app.rapid_config.verify_certs,
get_files_auth=self.app.rapid_config.get_files_basic_auth)
try:
delete_file = False
with open("{}/{}".format(quarantine_directory, item), 'r') as tmp_file:
data = pickle.loads(tmp_file.read())
try:
status = data['status']
parameters = data['parameters'] if 'parameters' in data else None
stats = data['stats'] if 'stats' in data else None
results = data['results'] if 'results' in data else None
communicator.send_done(int(item), status, parameters, stats, results, logger, headers={'X-Rapid-Quarantine': 'true'})
delete_file = True
except Exception:
import traceback
traceback.print_exc()
if delete_file:
try:
os.remove("{}/{}".format(quarantine_directory, item))
except Exception:
logger.error("Couldn't remove.")
except Exception:
import traceback
traceback.print_exc()
except Exception:
pass
return items
def can_work_on(self, work=None):
"""
This checks the work queue and what is being run at the moment.
:param work
:type work dict
:return: True|False
"""
executors = StoreService.get_executors()
return len(executors) < self.app.rapid_config.executor_count and not self.currently_running(work)
def currently_running(self, work):
pid_exists = False
try:
if work['action_instance_id'] is not None:
pid_exists = StoreService.check_for_pidfile(work['action_instance_id'])
if pid_exists is not None:
logger.info("Request was sent, but was already running, ignoring for [{}]".format(work['action_instance_id']))
except Exception:
pass
return pid_exists
def check_version(self, check_request):
if Version.HEADER in check_request.headers:
if check_request.headers[Version.HEADER] == self.get_version():
return True
if not StoreService.is_updating(self.app):
StoreService.set_updating(self.app)
thread = threading.Thread(target=self._perform_upgrade, args=(check_request.headers[Version.HEADER],))
thread.daemon = True
thread.start()
return False
return False
def _perform_upgrade(self, new_version):
logger.info("Performing upgrade: {}".format(new_version))
executors = self._sleep_for_executors()
self._start_upgrade(new_version, executors)
def _start_upgrade(self, new_version, executors):
if not executors:
UpgradeUtil.upgrade_version(new_version, self.app.rapid_config)
else:
logger.info("Waiting for executors...")
StoreService.set_updating(self.app, False)
@staticmethod
def _sleep_for_executors(seconds_to_sleep=10, count_limit=10):
executors = StoreService.get_executors()
count = 0
while executors:
time.sleep(seconds_to_sleep)
count += 1
if count >= count_limit:
break
executors = StoreService.get_executors()
return executors
def get_version(self):
return Version.get_version()
def work_execute(self):
try:
if self.can_work_on(request.get_json()):
self.start_work()
executors = StoreService.get_executors()
headers = {}
if len(executors) + 1 >= self.app.rapid_config.executor_count:
headers["X-Exclude-Resource"] = 'true'
return Response(json.dumps({"message": "Work started"}), status=201, content_type="application/json", headers=headers)
except Exception as exception:
logger.error(exception)
return Response(json.dumps({'message': str(exception)}), status=422, content_type='application/json')
return Response(json.dumps({"message": "Cannot execute work at this time."}), status=423, content_type='application/json')
def start_work(self):
executor = Executor(WorkRequest(request.get_json()),
self.app.rapid_config.master_uri,
workspace=self.app.rapid_config.workspace,
quarantine=self.app.rapid_config.quarantine_directory,
verify_certs=self.app.rapid_config.verify_certs,
rapid_config=self.app.rapid_config)
executor.verify_work_request()
executor.start()
def work_cancel(self, action_instance_id):
pid_file = StoreService.check_for_pidfile(action_instance_id)
if pid_file is not None:
try:
base_name = basename(pid_file)
os.kill(int(base_name.split('-')[-1]), 9)
return Response(json.dumps({"message": "Killed process."}), 200)
except Exception:
pass
return Response(json.dumps({"message": "Failed to kill process"}), 501)
|
tracker.py
|
"""
This script is a variant of dmlc-core/dmlc_tracker/tracker.py,
which is a specialized version for xgboost tasks.
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements, too-many-instance-attributes
import socket
import struct
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.debug(
'gethostbyname(socket.getfqdn()) failed... trying on hostname()'
)
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev not in (-1, rank):
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext not in (-1, rank):
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for _port in range(port, port_end):
try:
sock.bind((hostIP, _port))
self.port = _port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if not cset:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
print(msg.strip(), flush=True)
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Received %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert todo_nodes
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Received %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if not todo_nodes:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Received %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=(), daemon=True)
self.thread.start()
def join(self):
while self.thread.is_alive():
self.thread.join(100)
def alive(self):
return self.thread.is_alive()
|
vm_util.py
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of utility functions for working with virtual machines."""
import contextlib
import logging
import os
import platform
import random
import re
import string
import subprocess
import tempfile
import threading
import time
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import temp_dir
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
CERT_FILE = 'perfkitbenchmarker.pem'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
flags.DEFINE_boolean('simulate_maintenance', False,
'Whether to simulate VM maintenance during the benchmark. '
'This requires both benchmark and provider support.')
flags.DEFINE_integer('simulate_maintenance_delay', 0,
'The number of seconds to wait to start simulating '
'maintenance.')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), external and internal only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-q',
'-f',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
create_process.communicate()
if not os.path.isfile(GetCertPath()):
create_cmd = ['openssl',
'req',
'-x509',
'-new',
'-out',
PrependTempDir(CERT_FILE),
'-key',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
create_process.communicate(input='\n' * 7)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetCertPath():
return PrependTempDir(CERT_FILE)
def GetSshOptions(ssh_key_filename, connect_timeout=5):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=%d' % connect_timeout,
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=30',
'-o', 'ServerAliveCountMax=10',
'-i', ssh_key_filename
]
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise
else:
if log_errors:
logging.info('Retrying exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, cwd=None):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
cwd: Directory in which to execute the command.
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
"""
if env:
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Running: %s', full_cmd)
time_file_path = '/usr/bin/time'
running_on_windows = RunningOnWindows()
running_on_darwin = RunningOnDarwin()
should_time = (not (running_on_windows or running_on_darwin) and
os.path.isfile(time_file_path) and FLAGS.time_commands)
shell_value = running_on_windows
with tempfile.TemporaryFile() as tf_out, \
tempfile.TemporaryFile() as tf_err, \
tempfile.NamedTemporaryFile(mode='r') as tf_timing:
cmd_to_use = cmd
if should_time:
cmd_to_use = [time_file_path,
'-o', tf_timing.name,
'--quiet',
'-f', ', WallTime:%Es, CPU:%Us, MaxMemory:%Mkb '] + cmd
process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err, cwd=cwd)
def _KillProcess():
logging.error('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
timing_output = ''
if should_time:
timing_output = tf_timing.read().rstrip('\n')
debug_text = ('Ran: {%s} ReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, timing_output, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env)
if retcode:
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code.\n')
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress():
"""Returns whether a test should be run on an instance's external IP."""
return FLAGS.ip_addresses in (IpAddressSubset.EXTERNAL,
IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
Returns:
Whether a test should be run on an instance's internal IP.
"""
return (FLAGS.ip_addresses in (IpAddressSubset.BOTH,
IpAddressSubset.INTERNAL) or
(FLAGS.ip_addresses == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(prefix='tmp', suffix='', dir=None, delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
"""
f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def RunningOnDarwin():
"""Returns True if PKB is running on a Darwin OS machine."""
return os.name != WINDOWS and platform.system() == DARWIN
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
# Ensure that the password contains at least one of each 4 required
# character types.
password.append(random.choice(string.ascii_lowercase))
password.append(random.choice(string.ascii_uppercase))
password.append(random.choice(string.digits))
password.append(random.choice(special_chars))
return ''.join(password)
def StartSimulatedMaintenance():
"""Initiates the simulated maintenance event."""
if FLAGS.simulate_maintenance:
_SIMULATE_MAINTENANCE_SEMAPHORE.release()
def SetupSimulatedMaintenance(vm):
"""Called ready VM for simulated maintenance."""
if FLAGS.simulate_maintenance:
def _SimulateMaintenance():
_SIMULATE_MAINTENANCE_SEMAPHORE.acquire()
time.sleep(FLAGS.simulate_maintenance_delay)
vm.SimulateMaintenanceEvent()
t = threading.Thread(target=_SimulateMaintenance)
t.daemon = True
t.start()
|
mark_for_deployment.py
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains methods used by the paasta client to mark a docker image for
deployment to a cluster.instance.
"""
import argparse
import asyncio
import concurrent
import datetime
import functools
import getpass
import logging
import math
import os
import socket
import sys
import time
import traceback
from threading import Thread
from typing import Any
from typing import Callable
from typing import Collection
from typing import Dict
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
import a_sync
import humanize
import progressbar
from service_configuration_lib import read_deploy
from slackclient import SlackClient
from sticht import state_machine
from sticht.slo import SLOSlackDeploymentProcess
from sticht.slo import SLOWatcher
from paasta_tools import remote_git
from paasta_tools.api import client
from paasta_tools.cassandracluster_tools import CassandraClusterDeploymentConfig
from paasta_tools.cli.cmds.push_to_registry import is_docker_image_already_in_registry
from paasta_tools.cli.cmds.status import get_main_container
from paasta_tools.cli.cmds.status import get_version_table_entry
from paasta_tools.cli.cmds.status import recent_container_restart
from paasta_tools.cli.utils import get_jenkins_build_output_url
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_deploy_groups
from paasta_tools.cli.utils import trigger_deploys
from paasta_tools.cli.utils import validate_git_sha
from paasta_tools.cli.utils import validate_given_deploy_groups
from paasta_tools.cli.utils import validate_service_name
from paasta_tools.cli.utils import validate_short_git_sha
from paasta_tools.deployment_utils import get_currently_deployed_sha
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.paastaapi.models import InstanceStatusKubernetesV2
from paasta_tools.paastaapi.models import KubernetesPodV2
from paasta_tools.slack import get_slack_client
from paasta_tools.utils import _log
from paasta_tools.utils import _log_audit
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import format_tag
from paasta_tools.utils import get_git_url
from paasta_tools.utils import get_paasta_tag_from_deploy_group
from paasta_tools.utils import get_username
from paasta_tools.utils import ldap_user_search
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import RollbackTypes
from paasta_tools.utils import TimeoutError
DEFAULT_DEPLOYMENT_TIMEOUT = 3 * 3600 # seconds
DEFAULT_WARN_PERCENT = 17 # ~30min for default timeout
DEFAULT_AUTO_CERTIFY_DELAY = 600 # seconds
DEFAULT_SLACK_CHANNEL = "#deploy"
DEFAULT_STUCK_BOUNCE_RUNBOOK = "y/stuckbounce"
log = logging.getLogger(__name__)
def add_subparser(subparsers: argparse._SubParsersAction) -> None:
list_parser = subparsers.add_parser(
"mark-for-deployment",
help="Mark a docker image for deployment in git",
description=(
"'paasta mark-for-deployment' uses Git as the control-plane, to "
"signal to other PaaSTA components that a particular docker image "
"is ready to be deployed."
),
epilog=(
"Note: Access and credentials to the Git repo of a service are required "
"for this command to work."
),
)
list_parser.add_argument(
"-u",
"--git-url",
help=(
"Git url for service -- where magic mark-for-deployment tags are pushed. "
"Defaults to the normal git URL for the service."
),
default=None,
)
list_parser.add_argument(
"-c",
"-k",
"--commit",
help="Git sha to mark for deployment",
required=True,
type=validate_short_git_sha,
)
arg_deploy_group = list_parser.add_argument(
"-l",
"--deploy-group",
"--clusterinstance",
help="Mark the service ready for deployment in this deploy group (e.g. "
"cluster1.canary, cluster2.main). --clusterinstance is deprecated and "
"should be replaced with --deploy-group",
required=True,
)
arg_deploy_group.completer = lazy_choices_completer(list_deploy_groups) # type: ignore
arg_service = list_parser.add_argument(
"-s",
"--service",
help="Name of the service which you wish to mark for deployment. Leading "
'"services-" will be stripped.',
required=True,
)
arg_service.completer = lazy_choices_completer(list_services) # type: ignore
list_parser.add_argument(
"--verify-image-exists",
help="Check the docker registry and verify the image has been pushed",
dest="verify_image",
action="store_true",
default=False,
)
list_parser.add_argument(
"--wait-for-deployment",
help="Set to poll paasta and wait for the deployment to finish, "
"the default strategy is to mark for deployment and exit straightaway",
dest="block",
action="store_true",
default=False,
)
list_parser.add_argument(
"-t",
"--timeout",
dest="timeout",
type=int,
default=DEFAULT_DEPLOYMENT_TIMEOUT,
help=(
"Time in seconds to wait for paasta to deploy the service. "
"If the timeout is exceeded we return 1. "
"Default is %(default)s seconds."
),
)
list_parser.add_argument(
"-w",
"--warn",
dest="warn",
type=int,
default=DEFAULT_WARN_PERCENT,
help=(
"Percent of timeout to warn at if the deployment hasn't finished. "
"For example, --warn=75 will warn at 75%% of the timeout. "
"Defaults to %(default)s."
),
)
list_parser.add_argument(
"--auto-rollback",
help="Automatically roll back to the previously deployed sha if the deployment "
"times out or is canceled (ctrl-c). Only applicable with --wait-for-deployment. "
"Defaults to false.",
dest="auto_rollback",
action="store_true",
default=False,
)
list_parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
list_parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="Print out more output.",
)
list_parser.add_argument(
"--auto-certify-delay",
dest="auto_certify_delay",
type=int,
default=None, # the logic for this is complicated. See MarkForDeploymentProcess.get_auto_certify_delay.
help="After a deploy finishes, wait this many seconds before automatically certifying."
f"Default {DEFAULT_AUTO_CERTIFY_DELAY} when --auto-rollback is enabled",
)
list_parser.add_argument(
"--auto-abandon-delay",
dest="auto_abandon_delay",
type=int,
default=600,
help="After a rollback finishes, wait this many seconds before automatically abandoning.",
)
list_parser.add_argument(
"--auto-rollback-delay",
dest="auto_rollback_delay",
type=int,
default=30,
help="After noticing an SLO failure, wait this many seconds before automatically rolling back.",
)
list_parser.add_argument(
"--author",
dest="authors",
default=None,
action="append",
help="Additional author(s) of the deploy, who will be pinged in Slack",
)
list_parser.add_argument(
"--polling-interval",
dest="polling_interval",
type=float,
default=None,
help="How long to wait between each time we check to see if an instance is done deploying.",
)
list_parser.add_argument(
"--diagnosis-interval",
dest="diagnosis_interval",
type=float,
default=None,
help="How long to wait between diagnoses of why the bounce isn't done.",
)
list_parser.add_argument(
"--time-before-first-diagnosis",
dest="time_before_first_diagnosis",
type=float,
default=None,
help="Wait this long before trying to diagnose why the bounce isn't done.",
)
list_parser.set_defaults(command=paasta_mark_for_deployment)
def mark_for_deployment(
git_url: str, deploy_group: str, service: str, commit: str
) -> int:
"""Mark a docker image for deployment"""
tag = get_paasta_tag_from_deploy_group(
identifier=deploy_group, desired_state="deploy"
)
remote_tag = format_tag(tag)
ref_mutator = remote_git.make_force_push_mutate_refs_func(
targets=[remote_tag], sha=commit
)
max_attempts = 3
for attempt in range(1, max_attempts + 1):
try:
remote_git.create_remote_refs(
git_url=git_url, ref_mutator=ref_mutator, force=True
)
if "yelpcorp.com" in git_url:
trigger_deploys(service)
except Exception as e:
logline = f"Failed to mark {commit} for deployment in deploy group {deploy_group}! (attempt \
{attempt}/{max_attempts}, error: {e}) \n Have you pushed your commit?"
_log(service=service, line=logline, component="deploy", level="event")
time.sleep(5 * attempt)
else:
logline = f"Marked {commit} for deployment in deploy group {deploy_group}"
_log(service=service, line=logline, component="deploy", level="event")
audit_action_details = {"deploy_group": deploy_group, "commit": commit}
_log_audit(
action="mark-for-deployment",
action_details=audit_action_details,
service=service,
)
return 0
return 1
def deploy_authz_check(deploy_info: Dict[str, Any], service: str) -> None:
deploy_username = get_username()
system_paasta_config = load_system_paasta_config()
allowed_groups = (
deploy_info["allowed_push_groups"]
if deploy_info.get("allowed_push_groups") is not None
else system_paasta_config.get_default_push_groups()
)
if allowed_groups is not None:
search_base = system_paasta_config.get_ldap_search_base()
search_ou = system_paasta_config.get_ldap_search_ou()
host = system_paasta_config.get_ldap_host()
ldap_username = system_paasta_config.get_ldap_reader_username()
ldap_password = system_paasta_config.get_ldap_reader_password()
if not any(
[
deploy_username
in ldap_user_search(
group, search_base, search_ou, host, ldap_username, ldap_password
)
for group in allowed_groups
]
):
logline = f"current user is not authorized to perform this action (should be in one of {allowed_groups})"
_log(service=service, line=logline, component="deploy", level="event")
print(logline, file=sys.stderr)
sys.exit(1)
def report_waiting_aborted(service: str, deploy_group: str) -> None:
print(
PaastaColors.red(
"Waiting for deployment aborted."
" PaaSTA will continue trying to deploy this code."
)
)
print("If you wish to see the status, run:")
print()
print(f" paasta status -s {service} -l {deploy_group} -v")
print()
def get_authors_to_be_notified(
git_url: str, from_sha: str, to_sha: str, authors: Optional[Collection[str]]
) -> str:
if from_sha is None:
return ""
if authors:
authors_to_notify = authors
elif "git.yelpcorp.com" in git_url:
ret, git_authors = remote_git.get_authors(
git_url=git_url, from_sha=from_sha, to_sha=to_sha
)
if ret == 0:
authors_to_notify = git_authors.split()
else:
return f"(Could not get authors: {git_authors})"
else:
# We have no way of getting authors on the fly if the repository is not on gitolite
return ""
slacky_authors = ", ".join({f"<@{a}>" for a in authors_to_notify})
log.debug(f"Authors: {slacky_authors}")
return f"^ {slacky_authors}"
def deploy_group_is_set_to_notify(
deploy_info: Dict[str, Any], deploy_group: str, notify_type: str
) -> bool:
for step in deploy_info.get("pipeline", []):
if step.get("step", "") == deploy_group:
# Use the specific notify_type if available else use slack_notify
return step.get(notify_type, step.get("slack_notify", False))
return False
def get_deploy_info(service: str, soa_dir: str) -> Dict[str, Any]:
file_path = os.path.join(soa_dir, service, "deploy.yaml")
return read_deploy(file_path)
def print_rollback_cmd(
old_git_sha: str, commit: str, auto_rollback: bool, service: str, deploy_group: str
) -> None:
if old_git_sha is not None and old_git_sha != commit and not auto_rollback:
print()
print("If you wish to roll back, you can run:")
print()
print(
PaastaColors.bold(
" paasta rollback --service {} --deploy-group {} --commit {} ".format(
service, deploy_group, old_git_sha
)
)
)
def paasta_mark_for_deployment(args: argparse.Namespace) -> None:
"""Wrapping mark_for_deployment"""
if args.verbose:
log.setLevel(level=logging.DEBUG)
else:
log.setLevel(level=logging.INFO)
service = args.service
if service and service.startswith("services-"):
service = service.split("services-", 1)[1]
validate_service_name(service, soa_dir=args.soa_dir)
deploy_group = args.deploy_group
in_use_deploy_groups = list_deploy_groups(service=service, soa_dir=args.soa_dir)
_, invalid_deploy_groups = validate_given_deploy_groups(
in_use_deploy_groups, [deploy_group]
)
if len(invalid_deploy_groups) == 1:
print(
PaastaColors.red(
"ERROR: These deploy groups are not currently used anywhere: %s.\n"
% (",").join(invalid_deploy_groups)
)
)
print(
PaastaColors.red(
"This isn't technically wrong because you can mark-for-deployment before deploying there"
)
)
print(
PaastaColors.red(
"but this is probably a typo. Did you mean one of these in-use deploy groups?:"
)
)
print(PaastaColors.red(" %s" % (",").join(in_use_deploy_groups)))
print()
print(PaastaColors.red("Continuing regardless..."))
if args.git_url is None:
args.git_url = get_git_url(service=service, soa_dir=args.soa_dir)
commit = validate_git_sha(sha=args.commit, git_url=args.git_url)
old_git_sha = get_currently_deployed_sha(service=service, deploy_group=deploy_group)
if old_git_sha == commit:
print(
"Warning: The sha asked to be deployed already matches what is set to be deployed:"
)
print(old_git_sha)
print("Continuing anyway.")
if args.verify_image:
if not is_docker_image_already_in_registry(service, args.soa_dir, commit):
raise ValueError(
"Failed to find image in the registry for the following sha %s" % commit
)
deploy_info = get_deploy_info(service=service, soa_dir=args.soa_dir)
deploy_authz_check(deploy_info, service)
deploy_process = MarkForDeploymentProcess(
service=service,
deploy_info=deploy_info,
deploy_group=deploy_group,
commit=commit,
old_git_sha=old_git_sha,
git_url=args.git_url,
auto_rollback=args.auto_rollback,
block=args.block,
soa_dir=args.soa_dir,
timeout=args.timeout,
warn_pct=args.warn,
auto_certify_delay=args.auto_certify_delay,
auto_abandon_delay=args.auto_abandon_delay,
auto_rollback_delay=args.auto_rollback_delay,
authors=args.authors,
polling_interval=args.polling_interval,
diagnosis_interval=args.diagnosis_interval,
time_before_first_diagnosis=args.time_before_first_diagnosis,
)
ret = deploy_process.run()
return ret
class Progress:
waiting_on: Mapping[str, Collection[str]]
percent: float
def __init__(
self, percent: float = 0, waiting_on: Mapping[str, Collection[str]] = None
) -> None:
self.percent = percent
self.waiting_on = waiting_on
def human_readable(self, summary: bool) -> str:
if self.percent != 0 and self.percent != 100 and not summary:
s = f"{round(self.percent)}% (Waiting on {self.human_waiting_on()})"
else:
s = f"{round(self.percent)}%"
return s
def human_waiting_on(self) -> str:
if self.waiting_on is None:
return "N/A"
things = []
for cluster, instances in self.waiting_on.items():
num_instances = len(instances)
if num_instances == 0:
continue
elif num_instances == 1:
(one_instance,) = instances
things.append(f"`{cluster}`: `{one_instance}`")
else:
things.append(f"`{cluster}`: {len(instances)} instances")
return ", ".join(things)
class MarkForDeploymentProcess(SLOSlackDeploymentProcess):
rollback_states = ["start_rollback", "rolling_back", "rolled_back"]
rollforward_states = ["start_deploy", "deploying", "deployed"]
default_slack_channel = DEFAULT_SLACK_CHANNEL
paasta_status_reminder_handle: asyncio.TimerHandle
def __init__(
self,
service: str,
deploy_info: Dict,
deploy_group: str,
commit: str,
old_git_sha: str,
git_url: str,
auto_rollback: bool,
block: bool,
soa_dir: str,
timeout: float,
warn_pct: float,
auto_certify_delay: float,
auto_abandon_delay: float,
auto_rollback_delay: float,
authors: Optional[List[str]] = None,
polling_interval: float = None,
diagnosis_interval: float = None,
time_before_first_diagnosis: float = None,
) -> None:
self.service = service
self.deploy_info = deploy_info
self.deploy_group = deploy_group
self.commit = commit
self.old_git_sha = old_git_sha
self.git_url = git_url
self.auto_rollback = (
auto_rollback and old_git_sha is not None and old_git_sha != commit
)
self.auto_rollbacks_ever_enabled = self.auto_rollback
self.block = block
self.soa_dir = soa_dir
self.timeout = timeout
self.warn_pct = warn_pct
self.mark_for_deployment_return_code = -1
self.auto_certify_delay = auto_certify_delay
self.auto_abandon_delay = auto_abandon_delay
self.auto_rollback_delay = auto_rollback_delay
self.authors = authors
self.polling_interval = polling_interval
self.diagnosis_interval = diagnosis_interval
self.time_before_first_diagnosis = time_before_first_diagnosis
# Keep track of each wait_for_deployment task so we can cancel it.
self.wait_for_deployment_tasks: Dict[str, asyncio.Task] = {}
self.human_readable_status = "Waiting on mark-for-deployment to initialize..."
self.progress = Progress()
self.last_action = None
self.slo_watchers: List[SLOWatcher] = []
self.start_slo_watcher_threads(self.service, self.soa_dir)
# Initialize Slack threads and send the first message
super().__init__()
self.print_who_is_running_this()
def get_progress(self, summary: bool = False) -> str:
return self.progress.human_readable(summary)
def print_who_is_running_this(self) -> None:
build_url = get_jenkins_build_output_url()
if build_url is not None:
message = f"(<{build_url}|Jenkins Job>)"
else:
message = f"(Run by `{getpass.getuser()}` on {socket.getfqdn()})"
self.update_slack_thread(message)
def get_authors(self) -> str:
# In order to avoid notifying people who aren't part of the current
# service push, we calculate authors based on commits different since
# the current production SHA, as opposed to the old SHA on this deploy
# group.
#
# This avoids situations such as:
# * Notifying people from a previous push which went through stagef,
# if the new push goes through stageg.
# * Notifying everybody who has committed to a repo in the past year
# when updating a "legacy" deploy group (e.g. for yelp-main).
prod_deploy_group = self.deploy_info.get("production_deploy_group")
from_sha = None
if prod_deploy_group is not None:
from_sha = get_currently_deployed_sha(
service=self.service, deploy_group=prod_deploy_group
)
# If there's no production deploy group, or the production deploy group
# has never been deployed to, just use the old SHA from this deploy group.
if from_sha is None:
from_sha = self.old_git_sha
return get_authors_to_be_notified(
git_url=self.git_url,
from_sha=from_sha,
to_sha=self.commit,
authors=self.authors,
)
def ping_authors(self, message: str = None) -> None:
if message:
self.update_slack_thread(f"{message}\n{self.get_authors()}")
else:
self.update_slack_thread(self.get_authors())
def get_slack_client(self) -> SlackClient:
return get_slack_client().sc
def get_slack_channel(self) -> str:
""" Safely get some slack channel to post to. Defaults to ``DEFAULT_SLACK_CHANNEL``.
Currently only uses the first slack channel available, and doesn't support
multi-channel notifications. """
if self.deploy_info.get("slack_notify", True):
try:
channel = self.deploy_info.get("slack_channels")[0]
# Nightly jenkins builds will often re-deploy master. This causes Slack noise that wasn't present before
# the auto-rollbacks work.
if self.commit == self.old_git_sha:
print(
f"Rollback SHA matches rollforward SHA: {self.commit}, "
f"Sending slack notifications to {DEFAULT_SLACK_CHANNEL} instead of {channel}."
)
return DEFAULT_SLACK_CHANNEL
else:
return channel
except (IndexError, AttributeError, TypeError):
return DEFAULT_SLACK_CHANNEL
else:
return DEFAULT_SLACK_CHANNEL
def get_deployment_name(self) -> str:
return f"Deploy of `{self.commit[:8]}` of `{self.service}` to `{self.deploy_group}`:"
def on_enter_start_deploy(self) -> None:
self.update_slack_status(
f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group}..."
)
self.mark_for_deployment_return_code = mark_for_deployment(
git_url=self.git_url,
deploy_group=self.deploy_group,
service=self.service,
commit=self.commit,
)
if self.mark_for_deployment_return_code != 0:
self.trigger("mfd_failed")
else:
self.update_slack_thread(
f"Marked `{self.commit[:8]}` for {self.deploy_group}."
+ (
"\n" + self.get_authors()
if self.deploy_group_is_set_to_notify("notify_after_mark")
else ""
)
)
log.debug("triggering mfd_succeeded")
self.trigger("mfd_succeeded")
def schedule_paasta_status_reminder(self) -> None:
def waiting_on_to_status(
waiting_on: Mapping[str, Collection[str]]
) -> List[str]:
if waiting_on is None:
return [
f"`paasta status --service {self.service} --{self.deploy_group}` -vv"
]
commands = []
for cluster, instances in waiting_on.items():
num_instances = len(instances)
if num_instances == 0:
continue
else:
commands.append(
f"`paasta status --service {self.service} --cluster {cluster} --instance {','.join(instances)} -vv`"
)
return commands
def times_up() -> None:
try:
if self.state == "deploying":
human_max_deploy_time = humanize.naturaldelta(
datetime.timedelta(seconds=self.timeout)
)
stuck_bounce_runbook = os.environ.get(
"STUCK_BOUNCE_RUNBOOK", DEFAULT_STUCK_BOUNCE_RUNBOOK,
)
status_commands = "\n".join(
waiting_on_to_status(self.progress.waiting_on)
)
self.notify_users(
(
f"It has been {self.warn_pct}% of the "
f"maximum deploy time ({human_max_deploy_time}), "
"which means the deployment may be stuck. "
"Here are some things you can try:\n\n"
f"* See {stuck_bounce_runbook} for debugging help\n"
f"* Run these commands to see the status of instances that "
"have not yet finished deploying:\n\n"
f"{status_commands}"
)
)
except Exception as e:
log.error(
f"Non-fatal exception encountered when processing the status reminder: {e}"
)
def schedule_callback() -> None:
time_to_notify = self.timeout * self.warn_pct / 100
self.paasta_status_reminder_handle = self.event_loop.call_later(
time_to_notify, times_up
)
try:
self.event_loop.call_soon_threadsafe(schedule_callback)
except Exception as e:
log.error(
f"Non-fatal error encountered scheduling the status reminder callback: {e}"
)
def cancel_paasta_status_reminder(self) -> None:
try:
handle = self.get_paasta_status_reminder_handle()
if handle is not None:
handle.cancel()
self.paasta_status_reminder_handle = None
except Exception as e:
log.error(
f"Non-fatal error encountered when canceling the paasta status reminder: {e}"
)
def get_paasta_status_reminder_handle(self) -> Optional[asyncio.TimerHandle]:
try:
return self.paasta_status_reminder_handle
except AttributeError:
return None
def states(self) -> Collection[str]:
return [
"_begin",
"start_deploy",
"deploying",
"deployed",
"mfd_failed",
"deploy_errored",
"deploy_cancelled",
"start_rollback",
"rolling_back",
"rolled_back",
"abandon",
"complete",
]
def start_state(self) -> str:
return "_begin"
def start_transition(self) -> str:
return "start_deploy"
def valid_transitions(self) -> Iterator[state_machine.TransitionDefinition]:
rollback_is_possible = (
self.old_git_sha is not None and self.old_git_sha != self.commit
)
yield {"source": "_begin", "dest": "start_deploy", "trigger": "start_deploy"}
yield {
"source": "start_deploy",
"dest": "deploying",
"trigger": "mfd_succeeded",
}
yield {"source": "deploying", "dest": "deployed", "trigger": "deploy_finished"}
yield {
"source": ["start_deploy", "start_rollback"],
"dest": "mfd_failed",
"trigger": "mfd_failed",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_errored",
"trigger": "deploy_errored",
}
yield {
"source": [s for s in self.states() if not self.is_terminal_state(s)],
"dest": "deploy_cancelled",
"trigger": "deploy_cancelled",
}
if rollback_is_possible:
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_button_clicked",
"before": self.log_user_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": "start_rollback",
"trigger": "rollback_slo_failure",
"before": self.log_slo_rollback,
}
yield {
"source": self.rollback_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "rollback_slo_failure",
}
yield {
"source": self.rollback_states,
"dest": "start_deploy",
"trigger": "forward_button_clicked",
}
yield {
"source": self.rollforward_states,
"dest": None, # this makes it an "internal transition", effectively a noop.
"trigger": "forward_button_clicked",
}
yield {
"source": "start_rollback",
"dest": "rolling_back",
"trigger": "mfd_succeeded",
}
yield {
"source": "rolling_back",
"dest": "rolled_back",
"trigger": "deploy_finished",
}
yield {
"source": "deployed",
"dest": "complete",
"trigger": "complete_button_clicked",
}
yield {"source": "deployed", "dest": "complete", "trigger": "auto_certify"}
yield {
"source": ["rolled_back", "rolling_back"],
"dest": "abandon",
"trigger": "abandon_button_clicked",
}
yield {"source": "rolled_back", "dest": "abandon", "trigger": "auto_abandon"}
if rollback_is_possible:
# Suppress these buttons if it doesn't make sense to roll back.
yield {
"source": "*",
"dest": None, # Don't actually change state, just call the before function.
"trigger": "enable_auto_rollbacks_button_clicked",
"unless": [self.auto_rollbacks_enabled],
"before": self.enable_auto_rollbacks,
}
yield {
"source": "*",
"dest": None, # Don't actually change state, just call the before function.
"trigger": "disable_auto_rollbacks_button_clicked",
"conditions": [self.any_slo_failing, self.auto_rollbacks_enabled],
"before": self.disable_auto_rollbacks,
}
yield {
"source": "*",
"dest": None,
"trigger": "slos_started_failing",
"conditions": [self.auto_rollbacks_enabled],
"unless": [self.already_rolling_back],
"before": self.start_auto_rollback_countdown,
}
yield {
"source": "*",
"dest": None,
"trigger": "slos_stopped_failing",
"before": self.cancel_auto_rollback_countdown,
}
yield {
"source": "*",
"dest": None,
"trigger": "snooze_button_clicked",
"before": self.restart_timer,
"conditions": [self.is_timer_running],
}
def disable_auto_rollbacks(self) -> None:
self.cancel_auto_rollback_countdown()
self.auto_rollback = False
self.update_slack_status(
f"Automatic rollback disabled for this deploy. To disable this permanently for this step, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step."
)
def enable_auto_rollbacks(self) -> None:
self.auto_rollback = True
self.auto_rollbacks_ever_enabled = True
self.update_slack_status(
f"Automatic rollback enabled for this deploy. Will watch for failures and rollback when necessary. To set this permanently, edit `deploy.yaml` and set `auto_rollback: false` for the `{self.deploy_group}` step."
)
def auto_rollbacks_enabled(self) -> bool:
"""This getter exists so it can be a condition on transitions, since those need to be callables."""
return self.auto_rollback
def get_auto_rollback_delay(self) -> float:
return self.auto_rollback_delay
def get_auto_certify_delay(self) -> float:
if self.auto_certify_delay is not None:
return self.auto_certify_delay
else:
if self.auto_rollbacks_ever_enabled:
return DEFAULT_AUTO_CERTIFY_DELAY
else:
return 0
def already_rolling_back(self) -> bool:
return self.state in self.rollback_states
def status_code_by_state(self) -> Mapping[str, int]:
codes = {
"deploy_errored": 2,
"deploy_cancelled": 1,
"mfd_failed": self.mark_for_deployment_return_code,
"abandon": 1,
"complete": 0,
}
if not self.block:
# If we don't pass --wait-for-deployment, then exit immediately after mark-for-deployment succeeds.
codes["deploying"] = 0
if self.get_auto_certify_delay() <= 0:
# Instead of setting a 0-second timer to move to certify, just exit 0 when the deploy finishes.
codes["deployed"] = 0
return codes
def get_active_button(self) -> Optional[str]:
return {
"start_deploy": "forward",
"deploying": "forward",
"deployed": None,
"start_rollback": "rollback",
"rolling_back": "rollback",
"rolled_back": None,
}.get(self.state)
def on_enter_mfd_failed(self) -> None:
self.update_slack_status(
f"Marking `{self.commit[:8]}` for deployment for {self.deploy_group} failed. Please see Jenkins for more output."
) # noqa E501
def on_enter_deploying(self) -> None:
# if self.block is False, then deploying is a terminal state so we will promptly exit.
# Don't bother starting the background thread in this case.
if self.block:
thread = Thread(
target=self.do_wait_for_deployment, args=(self.commit,), daemon=True
)
thread.start()
self.cancel_paasta_status_reminder()
self.schedule_paasta_status_reminder()
def on_exit_deploying(self) -> None:
self.stop_waiting_for_deployment(self.commit)
self.cancel_paasta_status_reminder()
def on_enter_start_rollback(self) -> None:
self.update_slack_status(
f"Rolling back ({self.deploy_group}) to {self.old_git_sha}"
)
self.mark_for_deployment_return_code = mark_for_deployment(
git_url=self.git_url,
deploy_group=self.deploy_group,
service=self.service,
commit=self.old_git_sha,
)
if self.mark_for_deployment_return_code != 0:
self.trigger("mfd_failed")
else:
self.update_slack_thread(
f"Marked `{self.old_git_sha[:8]}` for {self.deploy_group}."
+ (
"\n" + self.get_authors()
if self.deploy_group_is_set_to_notify("notify_after_mark")
else ""
)
)
self.trigger("mfd_succeeded")
def on_enter_rolling_back(self) -> None:
if self.block:
thread = Thread(
target=self.do_wait_for_deployment,
args=(self.old_git_sha,),
daemon=True,
)
thread.start()
def on_exit_rolling_back(self) -> None:
self.stop_waiting_for_deployment(self.old_git_sha)
def on_enter_deploy_errored(self) -> None:
report_waiting_aborted(self.service, self.deploy_group)
self.update_slack_status(f"Deploy aborted, but it will still try to converge.")
self.send_manual_rollback_instructions()
if self.deploy_group_is_set_to_notify("notify_after_abort"):
self.ping_authors("Deploy errored")
def on_enter_deploy_cancelled(self) -> None:
if self.deploy_group_is_set_to_notify("notify_after_abort"):
self.ping_authors("Deploy cancelled")
def stop_waiting_for_deployment(self, target_commit: str) -> None:
try:
self.wait_for_deployment_tasks[target_commit].cancel()
del self.wait_for_deployment_tasks[target_commit]
except (KeyError, asyncio.InvalidStateError):
pass
@a_sync.to_blocking
async def do_wait_for_deployment(self, target_commit: str) -> None:
try:
self.stop_waiting_for_deployment(target_commit)
wait_for_deployment_task = asyncio.create_task(
wait_for_deployment(
service=self.service,
deploy_group=self.deploy_group,
git_sha=target_commit,
soa_dir=self.soa_dir,
timeout=self.timeout,
progress=self.progress,
polling_interval=self.polling_interval,
diagnosis_interval=self.diagnosis_interval,
time_before_first_diagnosis=self.time_before_first_diagnosis,
notify_fn=self.ping_authors,
)
)
self.wait_for_deployment_tasks[target_commit] = wait_for_deployment_task
await wait_for_deployment_task
if self.deploy_group_is_set_to_notify("notify_after_wait"):
self.ping_authors(f"Finished waiting for deployment of {target_commit}")
else:
self.update_slack_thread(
f"Finished waiting for deployment of {target_commit}"
)
self.trigger("deploy_finished")
except (KeyboardInterrupt, TimeoutError):
self.trigger("deploy_cancelled")
except NoSuchCluster:
self.trigger("deploy_errored")
except asyncio.CancelledError:
# Don't trigger deploy_errored when someone calls stop_waiting_for_deployment.
pass
except Exception:
log.error("Caught exception in wait_for_deployment:")
log.error(traceback.format_exc())
self.trigger("deploy_errored")
def on_enter_rolled_back(self) -> None:
self.update_slack_status(
f"Finished rolling back to `{self.old_git_sha[:8]}` in {self.deploy_group}"
)
line = f"Rollback to {self.old_git_sha[:8]} for {self.deploy_group} complete"
_log(service=self.service, component="deploy", line=line, level="event")
self.start_timer(self.auto_abandon_delay, "auto_abandon", "abandon")
def on_enter_deployed(self) -> None:
self.update_slack_status(
f"Finished deployment of `{self.commit[:8]}` to {self.deploy_group}"
)
line = f"Deployment of {self.commit[:8]} for {self.deploy_group} complete"
_log(service=self.service, component="deploy", line=line, level="event")
self.send_manual_rollback_instructions()
if self.any_slo_failing() and self.auto_rollbacks_enabled():
self.ping_authors(
"Because an SLO is currently failing, we will not automatically certify. Instead, we will wait indefinitely until you click one of the buttons above."
)
else:
if self.get_auto_certify_delay() > 0:
self.start_timer(
self.get_auto_certify_delay(), "auto_certify", "certify"
)
if self.deploy_group_is_set_to_notify("notify_after_good_deploy"):
self.ping_authors()
def on_enter_complete(self) -> None:
if self.deploy_group_is_set_to_notify("notify_after_good_deploy"):
self.ping_authors()
def send_manual_rollback_instructions(self) -> None:
if self.old_git_sha != self.commit:
message = (
"If you need to roll back manually, run: "
f"`paasta rollback --service {self.service} --deploy-group {self.deploy_group} "
f"--commit {self.old_git_sha}`"
)
self.update_slack_thread(message)
print(message)
def after_state_change(self) -> None:
self.update_slack()
super().after_state_change()
def get_signalfx_api_token(self) -> str:
return (
load_system_paasta_config()
.get_monitoring_config()
.get("signalfx_api_key", None)
)
def get_button_text(self, button: str, is_active: bool) -> str:
active_button_texts = {
"forward": f"Rolling Forward to {self.commit[:8]} :zombocom:"
}
inactive_button_texts = {
"forward": f"Continue Forward to {self.commit[:8]} :arrow_forward:",
"complete": f"Complete deploy to {self.commit[:8]} :white_check_mark:",
"snooze": f"Reset countdown",
"enable_auto_rollbacks": "Enable auto rollbacks :eyes:",
"disable_auto_rollbacks": "Disable auto rollbacks :close_eyes_monkey:",
}
if self.old_git_sha is not None:
active_button_texts.update(
{"rollback": f"Rolling Back to {self.old_git_sha[:8]} :zombocom:"}
)
inactive_button_texts.update(
{
"rollback": f"Roll Back to {self.old_git_sha[:8]} :arrow_backward:",
"abandon": f"Abandon deploy, staying on {self.old_git_sha[:8]} :x:",
}
)
return (active_button_texts if is_active else inactive_button_texts)[button]
def start_auto_rollback_countdown(self, extra_text: str = "") -> None:
cancel_button_text = self.get_button_text(
"disable_auto_rollbacks", is_active=False
)
super().start_auto_rollback_countdown(
extra_text=f'Click "{cancel_button_text}" to cancel this!'
)
if self.deploy_group_is_set_to_notify("notify_after_auto_rollback"):
self.ping_authors()
def deploy_group_is_set_to_notify(self, notify_type: str) -> bool:
return deploy_group_is_set_to_notify(
self.deploy_info, self.deploy_group, notify_type
)
def __build_rollback_audit_details(
self, rollback_type: RollbackTypes
) -> Dict[str, str]:
return {
"rolled_back_from": self.commit,
"rolled_back_to": self.old_git_sha,
"rollback_type": rollback_type.value,
"deploy_group": self.deploy_group,
}
def log_slo_rollback(self) -> None:
_log_audit(
action="rollback",
action_details=self.__build_rollback_audit_details(
RollbackTypes.AUTOMATIC_SLO_ROLLBACK
),
service=self.service,
)
def log_user_rollback(self) -> None:
_log_audit(
action="rollback",
action_details=self.__build_rollback_audit_details(
RollbackTypes.USER_INITIATED_ROLLBACK
),
service=self.service,
)
async def wait_until_instance_is_done(
executor: concurrent.futures.Executor,
service: str,
instance: str,
cluster: str,
git_sha: str,
instance_config: LongRunningServiceConfig,
polling_interval: float,
diagnosis_interval: float,
time_before_first_diagnosis: float,
should_ping_for_unhealthy_pods: bool,
notify_fn: Optional[Callable[[str], None]] = None,
) -> Tuple[str, str]:
loop = asyncio.get_running_loop()
diagnosis_task = asyncio.create_task(
periodically_diagnose_instance(
executor,
service,
instance,
cluster,
git_sha,
instance_config,
diagnosis_interval,
time_before_first_diagnosis,
should_ping_for_unhealthy_pods,
notify_fn,
)
)
try:
while not await loop.run_in_executor(
executor,
functools.partial(
check_if_instance_is_done,
service,
instance,
cluster,
git_sha,
instance_config,
),
):
await asyncio.sleep(polling_interval)
return (
cluster,
instance,
) # for the convenience of the caller, to know which future is finishing.
finally:
diagnosis_task.cancel()
async def periodically_diagnose_instance(
executor: concurrent.futures.Executor,
service: str,
instance: str,
cluster: str,
git_sha: str,
instance_config: LongRunningServiceConfig,
diagnosis_interval: float,
time_before_first_diagnosis: float,
should_ping_for_unhealthy_pods: bool,
notify_fn: Optional[Callable[[str], None]] = None,
) -> None:
await asyncio.sleep(time_before_first_diagnosis)
loop = asyncio.get_running_loop()
while True:
try:
await loop.run_in_executor(
executor,
functools.partial(
diagnose_why_instance_is_stuck,
service,
instance,
cluster,
git_sha,
instance_config,
should_ping_for_unhealthy_pods,
notify_fn,
),
)
except asyncio.CancelledError:
raise
except Exception:
print(f"Couldn't get status of {service}.{instance}:")
traceback.print_exc()
await asyncio.sleep(diagnosis_interval)
def diagnose_why_instance_is_stuck(
service: str,
instance: str,
cluster: str,
git_sha: str,
instance_config: LongRunningServiceConfig,
should_ping_for_unhealthy_pods: bool,
notify_fn: Optional[Callable[[str], None]] = None,
) -> None:
api = client.get_paasta_oapi_client(cluster=cluster)
try:
status = api.service.status_instance(
service=service,
instance=instance,
include_smartstack=False,
include_envoy=False,
include_mesos=False,
new=True,
)
except api.api_error as e:
log.warning(
"Error getting service status from PaaSTA API for "
f"{cluster}: {e.status} {e.reason}"
)
return
print(f" Status for {service}.{instance} in {cluster}:")
for version in status.kubernetes_v2.versions:
# We call get_version_table_entry directly so that we can set version_name_suffix based on git_sha instead of
# creation time of the version (which is what get_versions_table does.)
# Without this, we'd call the old version "new" until the new version is actually created, which would be confusing.
for line in get_version_table_entry(
version,
service,
instance,
cluster,
version_name_suffix="new" if version.git_sha == git_sha else "old",
show_config_sha=True,
verbose=0,
):
print(f" {line}")
print("")
if should_ping_for_unhealthy_pods and notify_fn:
maybe_ping_for_unhealthy_pods(
service, instance, cluster, git_sha, status, notify_fn
)
already_pinged = False
def maybe_ping_for_unhealthy_pods(
service: str,
instance: str,
cluster: str,
git_sha: str,
status: InstanceStatusKubernetesV2,
notify_fn: Callable[[str], None],
) -> None:
global already_pinged
if not already_pinged:
# there can be multiple current versions, e.g. if someone changes yelpsoa-configs during a bounce.
current_versions = [
v for v in status.kubernetes_v2.versions if v.git_sha == git_sha
]
pingable_pods = [
pod
for version in current_versions
for pod in version.pods
if should_ping_for_pod(pod)
]
if pingable_pods:
already_pinged = True
ping_for_pods(service, instance, cluster, pingable_pods, notify_fn)
def should_ping_for_pod(pod: KubernetesPodV2) -> bool:
return recent_container_restart(get_main_container(pod))
def ping_for_pods(
service: str,
instance: str,
cluster: str,
pods: List[KubernetesPodV2],
notify_fn: Callable[[str], None],
) -> None:
pods_by_reason: Dict[str, List[KubernetesPodV2]] = {}
for pod in pods:
pods_by_reason.setdefault(get_main_container(pod).reason, []).append(pod)
for reason, pods_with_reason in pods_by_reason.items():
explanation = {
"Error": "crashed on startup",
"OOMKilled": "run out of memory",
"CrashLoopBackOff": "crashed on startup several times, and Kubernetes is backing off restarting them",
}.get(reason, f"restarted ({reason})")
status_tip = f"Take a look at the output of your unhealthy pods with `paasta status -s {service} -i {instance} -c {cluster} -vv` (more -v for more output.)"
tip = {
"Error": (
f"This may indicate a bug in your code, a misconfiguration in yelpsoa-configs, or missing srv-configs. {status_tip}"
),
"CrashLoopBackOff": f"This may indicate a bug in your code, a misconfiguration in yelpsoa-configs, or missing srv-configs. {status_tip}",
"OOMKilled": " ".join(
(
"This probably means your new version of code requires more memory than the old version."
"You may want to increase memory in yelpsoa-configs or roll back."
"Ask #paasta if you need help with this.",
)
),
}.get(reason, "")
notify_fn(
f"Some of the replicas of your new version have {explanation}: {', '.join(f'`{p.name}`' for p in pods_with_reason)}\n{tip}"
)
def check_if_instance_is_done(
service: str,
instance: str,
cluster: str,
git_sha: str,
instance_config: LongRunningServiceConfig,
api: Optional[client.PaastaOApiClient] = None,
) -> bool:
if api is None:
api = client.get_paasta_oapi_client(cluster=cluster)
if not api:
log.warning(
"Couldn't reach the PaaSTA api for {}! Assuming it is not "
"deployed there yet.".format(cluster)
)
return False
inst_str = f"{service}.{instance} in {cluster}"
log.debug(f"Inspecting the deployment status of {inst_str}")
status = None
try:
status = api.service.bounce_status_instance(service=service, instance=instance)
except api.api_error as e:
if e.status == 404: # non-existent instance
# TODO(PAASTA-17290): just print the error message so that we
# can distinguish between sources of 404s
log.warning(
"Can't get status for instance {}, service {} in "
"cluster {}. This is normally because it is a new "
"service that hasn't been deployed by PaaSTA yet.".format(
instance, service, cluster
)
)
else: # 500 - error talking to api
log.warning(
"Error getting service status from PaaSTA API for "
f"{cluster}: {e.status} {e.reason}"
)
log.debug(f"No status for {inst_str}. Not deployed yet.")
return False
if not status: # 204 - instance is not bounceable
log.debug(
f"{inst_str} is not a supported bounceable instance. "
"Only long-running instances running on Kubernetes are currently "
"supported. Continuing without watching."
)
return True
# Case: instance is stopped
if status.expected_instance_count == 0 or status.desired_state == "stop":
log.debug(f"{inst_str} is marked as stopped. Ignoring it.")
return True
short_git_sha = git_sha[:8]
active_shas = {g[:8] for g, c in status.active_shas}
if short_git_sha in active_shas:
non_desired_shas = active_shas.difference({short_git_sha})
# Case: bounce in-progress
if len(non_desired_shas) == 1:
(other_sha,) = non_desired_shas
print(
f" {inst_str} is still bouncing, from {other_sha} to {short_git_sha}"
)
return False
# Case: previous bounces not yet finished when this one was triggered
elif len(non_desired_shas) > 1:
print(
f" {inst_str} is still bouncing to {short_git_sha}, but there are "
f"multiple other bouncing versions running: {non_desired_shas}"
)
return False
else:
# Case: bounce not yet started
print(
f" {inst_str} hasn't started bouncing to {short_git_sha}; "
f"only the following versions are running: {active_shas}"
)
return False
# Case: instance is in not running
if status.deploy_status not in {"Running", "Deploying", "Waiting"}:
print(
f" {inst_str} isn't running yet; it is in the state: {status.deploy_status}"
)
return False
# Case: not enough replicas are up for the instance to be considered bounced
# The bounce margin factor defines what proportion of instances we need to be "safe",
# so consider it scaled up "enough" if we have that proportion of instances ready.
required_instance_count = int(
math.ceil(
instance_config.get_bounce_margin_factor() * status.expected_instance_count
)
)
if required_instance_count > status.running_instance_count:
print(
f" {inst_str} has only {status.running_instance_count} replicas up, "
f"below the required minimum of {required_instance_count}"
)
return False
# Case: completed
print(
f"Complete: {service}.{instance} on {cluster} is 100% deployed at "
f"{status.running_instance_count} replicas on {status.active_shas[0][0]}"
)
return True
WAIT_FOR_INSTANCE_CLASSES = [
MarathonServiceConfig,
KubernetesDeploymentConfig,
CassandraClusterDeploymentConfig,
]
def get_instance_configs_for_service_in_cluster_and_deploy_group(
service_configs: PaastaServiceConfigLoader, cluster: str, deploy_group: str
) -> Iterator[LongRunningServiceConfig]:
for instance_class in WAIT_FOR_INSTANCE_CLASSES:
for instance_config in service_configs.instance_configs(
cluster=cluster, instance_type_class=instance_class
):
if instance_config.get_deploy_group() == deploy_group:
yield instance_config
def get_instance_configs_for_service_in_deploy_group_all_clusters(
service: str, deploy_group: str, git_sha: str, soa_dir: str
) -> Dict[str, List[LongRunningServiceConfig]]:
service_configs = PaastaServiceConfigLoader(
service=service, soa_dir=soa_dir, load_deployments=False
)
instance_configs_per_cluster = {}
api_endpoints = load_system_paasta_config().get_api_endpoints()
for cluster in service_configs.clusters:
if cluster not in api_endpoints:
print(
PaastaColors.red(
"Cluster %s is NOT in paasta-api endpoints config." % cluster
)
)
raise NoSuchCluster
instance_configs_per_cluster[cluster] = list(
get_instance_configs_for_service_in_cluster_and_deploy_group(
service_configs, cluster, deploy_group
)
)
return instance_configs_per_cluster
async def wait_for_deployment(
service: str,
deploy_group: str,
git_sha: str,
soa_dir: str,
timeout: float,
progress: Optional[Progress] = None,
polling_interval: float = None,
diagnosis_interval: float = None,
time_before_first_diagnosis: float = None,
notify_fn: Optional[Callable[[str], None]] = None,
) -> Optional[int]:
instance_configs_per_cluster: Dict[
str, List[LongRunningServiceConfig]
] = get_instance_configs_for_service_in_deploy_group_all_clusters(
service, deploy_group, git_sha, soa_dir
)
total_instances = sum(len(ics) for ics in instance_configs_per_cluster.values())
if not instance_configs_per_cluster:
_log(
service=service,
component="deploy",
line=(
"Couldn't find any long-running instances for service {} in deploy group {}. Exiting.".format(
service, deploy_group
)
),
level="event",
)
return None
print(
"Waiting for deployment of {} for '{}' to complete...".format(
git_sha, deploy_group
)
)
system_paasta_config = load_system_paasta_config()
max_workers = system_paasta_config.get_mark_for_deployment_max_polling_threads()
if polling_interval is None:
polling_interval = (
system_paasta_config.get_mark_for_deployment_default_polling_interval()
)
if diagnosis_interval is None:
diagnosis_interval = (
system_paasta_config.get_mark_for_deployment_default_diagnosis_interval()
)
if time_before_first_diagnosis is None:
time_before_first_diagnosis = (
system_paasta_config.get_mark_for_deployment_default_time_before_first_diagnosis()
)
with progressbar.ProgressBar(maxval=total_instances) as bar:
instance_done_futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
for cluster, instance_configs in instance_configs_per_cluster.items():
for instance_config in instance_configs:
instance_done_futures.append(
asyncio.ensure_future(
wait_until_instance_is_done(
executor,
service,
instance_config.get_instance(),
cluster,
git_sha,
instance_config,
polling_interval=polling_interval,
diagnosis_interval=diagnosis_interval,
time_before_first_diagnosis=time_before_first_diagnosis,
should_ping_for_unhealthy_pods=instance_config.get_should_ping_for_unhealthy_pods(
system_paasta_config.get_mark_for_deployment_should_ping_for_unhealthy_pods()
),
notify_fn=notify_fn,
),
)
)
remaining_instances: Dict[str, Set[str]] = {
cluster: {ic.get_instance() for ic in instance_configs}
for cluster, instance_configs in instance_configs_per_cluster.items()
}
finished_instances = 0
async def periodically_update_progressbar() -> None:
while True:
await asyncio.sleep(60)
bar.update(finished_instances)
print()
periodically_update_progressbar_task = asyncio.create_task(
periodically_update_progressbar()
)
try:
for coro in asyncio.as_completed(
instance_done_futures, timeout=timeout
):
cluster, instance = await coro
finished_instances += 1
bar.update(finished_instances)
if progress is not None:
progress.percent = bar.percentage
remaining_instances[cluster].remove(instance)
progress.waiting_on = remaining_instances
except asyncio.TimeoutError:
_log(
service=service,
component="deploy",
line=compose_timeout_message(
remaining_instances, timeout, deploy_group, service, git_sha
),
level="event",
)
raise TimeoutError
except asyncio.CancelledError:
# Wait for all the tasks to finish before closing out the ThreadPoolExecutor, to avoid RuntimeError('cannot schedule new futures after shutdown')
for coro in instance_done_futures:
coro.cancel()
try:
await coro
except asyncio.CancelledError:
pass
raise
else:
sys.stdout.flush()
if progress is not None:
progress.percent = 100.0
progress.waiting_on = None
return 0
finally:
periodically_update_progressbar_task.cancel()
def compose_timeout_message(
remaining_instances: Mapping[str, Collection[str]],
timeout: float,
deploy_group: str,
service: str,
git_sha: str,
) -> str:
paasta_status = []
paasta_logs = []
for cluster, instances in sorted(remaining_instances.items()):
if instances:
joined_instances = ",".join(instances)
paasta_status.append(
"paasta status -c {cluster} -s {service} -i {instances}".format(
cluster=cluster, service=service, instances=joined_instances
)
)
paasta_logs.append(
"paasta logs -c {cluster} -s {service} -i {instances} -C deploy -l 1000".format(
cluster=cluster, service=service, instances=joined_instances
)
)
return (
"\n\nTimed out after {timeout} seconds, waiting for {service} "
"in {deploy_group} to be deployed by PaaSTA.\n"
"This probably means the deploy hasn't succeeded. The new service "
"might not be healthy or one or more clusters could be having issues.\n\n"
"To debug, follow steps in {stuck_bounce_runbook}, "
"or try running the following to see the status of instances we tried to deploy:\n\n"
" {status_commands}\n\n {logs_commands}"
"\n\nIf the service is known to be slow to start you may wish to "
"increase the timeout on this step.\n"
"To wait a little longer run:\n\n"
" paasta wait-for-deployment -s {service} -l {deploy_group} -c {git_sha}".format(
timeout=timeout,
deploy_group=deploy_group,
service=service,
git_sha=git_sha,
status_commands="\n ".join(paasta_status),
logs_commands="\n ".join(paasta_logs),
stuck_bounce_runbook=os.environ.get(
"STUCK_BOUNCE_RUNBOOK", DEFAULT_STUCK_BOUNCE_RUNBOOK,
),
)
)
class NoSuchCluster(Exception):
"""To be raised by wait_for_deployment() when a service has a marathon or
kubernetes config for a cluster that is not listed in /etc/paasta/api_endpoints.json.
"""
pass
|
GuiUtils.py
|
import queue
import os
import threading
import tkinter as tk
from Utils import local_path
def set_icon(window):
er16 = tk.PhotoImage(file=local_path(os.path.join("data","ER16.gif")))
er32 = tk.PhotoImage(file=local_path(os.path.join("data","ER32.gif")))
er48 = tk.PhotoImage(file=local_path(os.path.join("data","ER48.gif")))
window.tk.call('wm', 'iconphoto', window._w, er16, er32, er48) # pylint: disable=protected-access
# Although tkinter is intended to be thread safe, there are many reports of issues
# some which may be platform specific, or depend on if the TCL library was compiled without
# multithreading support. Therefore I will assume it is not thread safe to avoid any possible problems
class BackgroundTask(object):
def __init__(self, window, code_to_run):
self.window = window
self.queue = queue.Queue()
self.running = True
self.process_queue()
self.task = threading.Thread(target=code_to_run, args=(self,))
self.task.start()
def stop(self):
self.running = False
#safe to call from worker
def queue_event(self, event):
self.queue.put(event)
def process_queue(self):
try:
while True:
if not self.running:
return
event = self.queue.get_nowait()
event()
if self.running:
#if self is no longer running self.window may no longer be valid
self.window.update_idletasks()
except queue.Empty:
pass
if self.running:
self.window.after(100, self.process_queue)
class BackgroundTaskProgress(BackgroundTask):
def __init__(self, parent, code_to_run, title):
self.parent = parent
self.window = tk.Toplevel(parent)
self.window['padx'] = 5
self.window['pady'] = 5
try:
self.window.attributes("-toolwindow", 1)
except tk.TclError:
pass
self.window.wm_title(title)
self.label_var = tk.StringVar()
self.label_var.set("")
self.label = tk.Label(self.window, textvariable=self.label_var, width=50)
self.label.pack()
self.window.resizable(width=False, height=False)
set_icon(self.window)
self.window.focus()
super().__init__(self.window, code_to_run)
#safe to call from worker thread
def update_status(self, text):
self.queue_event(lambda: self.label_var.set(text))
# only call this in an event callback
def close_window(self):
self.stop()
self.window.destroy()
class ToolTips(object):
# This class derived from wckToolTips which is available under the following license:
# Copyright (c) 1998-2007 by Secret Labs AB
# Copyright (c) 1998-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
label = None
window = None
active = 0
tag = None
after_id = None
@classmethod
def getcontroller(cls, widget):
if cls.tag is None:
cls.tag = "ui_tooltip_%d" % id(cls)
widget.bind_class(cls.tag, "<Enter>", cls.enter)
widget.bind_class(cls.tag, "<Leave>", cls.leave)
widget.bind_class(cls.tag, "<Motion>", cls.motion)
widget.bind_class(cls.tag, "<Destroy>", cls.leave)
# pick suitable colors for tooltips
try:
cls.bg = "systeminfobackground"
cls.fg = "systeminfotext"
widget.winfo_rgb(cls.fg) # make sure system colors exist
widget.winfo_rgb(cls.bg)
except Exception:
cls.bg = "#ffffe0"
cls.fg = "black"
return cls.tag
@classmethod
def register(cls, widget, text):
widget.ui_tooltip_text = text
tags = list(widget.bindtags())
tags.append(cls.getcontroller(widget))
widget.bindtags(tuple(tags))
@classmethod
def unregister(cls, widget):
tags = list(widget.bindtags())
tags.remove(cls.getcontroller(widget))
widget.bindtags(tuple(tags))
# event handlers
@classmethod
def enter(cls, event):
widget = event.widget
if not cls.label:
# create and hide balloon help window
cls.popup = tk.Toplevel(bg=cls.fg, bd=1)
cls.popup.overrideredirect(1)
cls.popup.withdraw()
cls.label = tk.Label(
cls.popup, fg=cls.fg, bg=cls.bg, bd=0, padx=2, justify=tk.LEFT
)
cls.label.pack()
cls.active = 0
cls.xy = event.x_root + 16, event.y_root + 10
cls.event_xy = event.x, event.y
cls.after_id = widget.after(200, cls.display, widget)
@classmethod
def motion(cls, event):
cls.xy = event.x_root + 16, event.y_root + 10
cls.event_xy = event.x, event.y
@classmethod
def display(cls, widget):
if not cls.active:
# display balloon help window
text = widget.ui_tooltip_text
if callable(text):
text = text(widget, cls.event_xy)
cls.label.config(text=text)
cls.popup.deiconify()
cls.popup.lift()
cls.popup.geometry("+%d+%d" % cls.xy)
cls.active = 1
cls.after_id = None
@classmethod
def leave(cls, event):
widget = event.widget
if cls.active:
cls.popup.withdraw()
cls.active = 0
if cls.after_id:
widget.after_cancel(cls.after_id)
cls.after_id = None
|
leo_cloud.py
|
#@+leo-ver=5-thin
#@+node:ekr.20170925083314.1: * @file ../plugins/leo_cloud.py
#@+<< docstring >>
#@+node:ekr.20210518113636.1: ** << docstring >>
"""
leo_cloud.py - synchronize Leo subtrees with remote central server
Terry N. Brown, terrynbrown@gmail.com, Fri Sep 22 10:34:10 2017
This plugin allows subtrees within a .leo file to be stored in the cloud. It
should be possible to support various cloud platforms, currently git and systems
like DropBox are supported (i.e. you can use GitLab or GitHub or your own remote
git server).
A leo_cloud subtree has a top node with a headline that starts with
'@leo_cloud'. The rest of the headline is ignored. The body of this top node is
used to describe the cloud service, e.g.:
type: Git
remote: git@gitlab.com:tnbrown/leo_cloud_storage.git
local: ~/.leo/leo_cloud/gitlab_leo_cloud_storage
ID: shortcuts
read_on_load: ask
write_on_save: ask
The first three lines can be repeated with different IDs to store
different subtrees at the same remote cloud location.
read_on_load: / write_on_save: can be yes, no, ask, or background (read_on_load
only). If it's not one of those three, there's a warning dialog. `background`
performs a check against the cloud in the background, and then behaves like
`ask` if a difference is detected.
There's also a file system backend, which would look like this:
type: FileSystem
root: ~/DropBox/leo_cloud
ID: my_notes
read_on_load: ask
write_on_save: ask
If you set up the FileSystem backend it into a folder that is sync'ed
externally, as shown above, it can serve as a cloud adapter for services like
DropBox, Google Drive, OneDrive, etc. etc.
In addition to the Git and FileSystem cloud types it should be possible to add
many others - AWS, WebDAV, sFTP, whatever.
FYI: https://gitlab.com/ gives you free private repos.
The plugin stores headline, body, and uA (unknown attributes). The caveat is
that it must be JSON serializable, this is to avoid pickle flavor issues. I
don't think this will cause problems except for legacy datetime objects from the
todo.py plugin and set()s in the tags plugin. I think both can be fixed easily -
a custom JSON writer can write datetime as iso string time and sets as lists,
and the tags plugin can coerce lists to sets. I think the todo.py plugin already
reads iso string time values.
My intended use was a common synchronized todo list across machines, which this
achieves.
An unintended bonus is that you can use it to sync. your settings across
machines easily too. Like this:
@settings
@keys
@leo_cloud
@shortcuts
"just works", so now your shortcuts etc. can be stored on a central
server.
"""
#@-<< docstring >>
#@+<< imports >>
#@+node:ekr.20210518113710.1: ** << imports >>
import json
import os
import re
import shlex
import subprocess
import tempfile
import threading
from copy import deepcopy
from datetime import date, datetime
from hashlib import sha1
from leo.core import leoGlobals as g
from leo.core.leoNodes import vnode
from leo.core.leoQt import QtCore # see QTimer in LeoCloud.__init__
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
#@-<< imports >>
# for 'key: value' lines in body text
KWARG_RE = re.compile(r"^([A-Za-z][A-Za-z0-9_]*): (.*)")
#@+others
#@+node:ekr.20201012111338.3: ** init (leo_cloud.py)
def init ():
g.registerHandler(('new','open2'), onCreate)
g.registerHandler(('save1'), onSave)
g.plugin_signon(__name__)
return True
#@+node:ekr.20201012111338.4: ** onCreate (leo_cloud.py)
def onCreate (tag, keys):
c = keys.get('c')
if not c:
return
c._leo_cloud = LeoCloud(c)
#@+node:ekr.20201012111338.5: ** onSave (leo_cloud.py)
def onSave(tag, keys):
c = keys.get('c')
if not c:
return None
if getattr(c, '_leo_cloud'):
c._leo_cloud.save_clouds()
return None # explicitly not stopping save1 hook
#@+node:ekr.20201012111338.6: ** lc_read_current (leo_cloud.py)
@g.command("lc-read-current")
def lc_read_current(event):
"""write current Leo Cloud subtree to cloud"""
c = event.get('c')
if not c or not hasattr(c, '_leo_cloud'):
return
c._leo_cloud.read_current()
#@+node:ekr.20201012111338.7: ** lc_write_current (leo_cloud.py)
@g.command("lc-write-current")
def lc_write_current(event):
"""write current Leo Cloud subtree to cloud"""
c = event.get('c')
if not c or not hasattr(c, '_leo_cloud'):
return
c._leo_cloud.write_current()
#@+node:ekr.20201012111338.8: ** class LeoCloudIOBase
class LeoCloudIOBase:
"""Leo Cloud IO layer Base Class
LeoCloudIO layer sits between LeoCloud plugin and backends,
which might be leo_cloud_server.py or Google Drive etc. etc.
"""
#@+others
#@+node:ekr.20201012111338.9: *3* LeoCloudIOBase.__init__
def __init__(self, c, p, kwargs):
"""
Args:
c (context): Leo outline
p (position): @leo_cloud position
kwargs (dict): key word args from p.b
"""
self.v = p.v
self.c = c
self.lc_id = kwargs['ID']
#@+node:ekr.20201012111338.10: *3* LeoCloudIOBase.get_subtree
def get_subtree(self, lc_id):
"""get_subtree - get a Leo subtree from the cloud
Args:
lc_id (str(?)): resource to get
:returns: vnode build from lc_id
"""
# pylint: disable=no-member
# self.get_data
return self.c._leo_cloud.from_dict(self.get_data(lc_id))
#@+node:ekr.20201012111338.11: *3* LeoCloudIOBase.put_subtree
def put_subtree(self, lc_id, v):
"""put - put a subtree into the Leo Cloud
Args:
lc_id (str(?)): place to put it
v (vnode): subtree to put
"""
# pylint: disable=no-member
# self.put_data
self.put_data(lc_id, LeoCloud.to_dict(v))
#@-others
#@+node:ekr.20201012111338.12: ** class LeoCloudIOFileSystem(LeoCloudIOBase)
class LeoCloudIOFileSystem(LeoCloudIOBase):
"""Leo Cloud IO layer that just loads / saves local files.
i.e it's just for development / testing
"""
#@+others
#@+node:ekr.20201012111338.13: *3* LeoCloudIOFileSystem(LeoCloudIOBase).__init__
def __init__(self, c, p, kwargs):
"""
Args:
basepath (str): root folder for data
"""
LeoCloudIOBase.__init__(self, c, p, kwargs)
self.basepath = os.path.expanduser(kwargs['root'])
if not os.path.exists(self.basepath):
os.makedirs((self.basepath))
#@+node:ekr.20201012111338.14: *3* LeoCloudIOFileSystem(LeoCloudIOBase).get_data
def get_data(self, lc_id):
"""get_data - get a Leo Cloud resource
Args:
lc_id (str(?)): resource to get
Returns:
object loaded from JSON
"""
filepath = os.path.join(self.basepath, lc_id+'.json')
with open(filepath) as data:
return json.load(data)
#@+node:ekr.20201012111338.15: *3* LeoCloudIOFileSystem(LeoCloudIOBase).put_data
def put_data(self, lc_id, data):
"""put - store data in the Leo Cloud
Args:
lc_id (str(?)): place to put it
data (obj): data to store
"""
filepath = os.path.join(self.basepath, lc_id+'.json')
with open(filepath, 'w') as out:
return out.write(LeoCloud.to_json(data))
#@-others
#@+node:ekr.20201012111338.16: ** class LeoCloudIOGit(LeoCloudIOBase)
class LeoCloudIOGit(LeoCloudIOBase):
"""Leo Cloud IO layer that just loads / saves local files.
i.e it's just for development / testing
"""
#@+others
#@+node:ekr.20201012111338.17: *3* LeoCloudIOGit(LeoCloudIOBase).__init__
def __init__(self, c, p, kwargs):
"""
Args:
basepath (str): root folder for data
"""
# if p.v._leo_cloud_io was used, we'd probably also need to pull
# in get_data(), so don't bother with p.v._leo_cloud_io
# p.v._leo_cloud_io = self
LeoCloudIOBase.__init__(self, c, p, kwargs)
self.remote = kwargs['remote']
self.local = os.path.expanduser(kwargs['local'])
if not os.path.exists(self.local):
os.makedirs((self.local))
if not os.listdir(self.local):
self._run_git('git clone "%s" "%s"'% (self.remote, self.local))
self._run_git('git -C "%s" pull' % self.local)
#@+node:ekr.20201012111338.18: *3* LeoCloudIOGit(LeoCloudIOBase)._run_git
def _run_git(self, text):
"""_run_git - run a git command
Args:
text (str): command to run
"""
subprocess.Popen(shlex.split(text)).wait()
#@+node:ekr.20201012111338.19: *3* LeoCloudIOGit(LeoCloudIOBase).get_data
def get_data(self, lc_id):
"""get_data - get a Leo Cloud resource
Args:
lc_id (str(?)): resource to get
:returns: object loaded from JSON
"""
filepath = os.path.join(self.local, lc_id+'.json')
with open(filepath) as data:
return json.load(data)
#@+node:ekr.20201012111338.20: *3* LeoCloudIOGit(LeoCloudIOBase).put_data
def put_data(self, lc_id, data):
"""put - store data in the Leo Cloud
Args:
lc_id (str(?)): place to put it
data (obj): data to store
"""
filepath = os.path.join(self.local, lc_id+'.json')
with open(filepath, 'w') as out:
out.write(LeoCloud.to_json(data))
self._run_git('git -C "%s" add "%s"' % (self.local, lc_id+'.json'))
self._run_git('git -C "%s" commit -mupdates' % self.local)
self._run_git('git -C "%s" push' % self.local)
#@-others
#@+node:ekr.20201012111338.21: ** class LeoCloud
class LeoCloud:
#@+others
#@+node:ekr.20201012111338.22: *3* LeoCloud.__init__
def __init__(self, c):
"""
Args:
c (context): Leo context """
self.c = c
self.bg_finished = False # used for background thread
self.bg_results = [] # results from background thread
# we're here via open2 hook, but too soon to load from cloud,
# so defer
QtCore.QTimer.singleShot(0, self.load_clouds)
#@+node:ekr.20201012111338.23: *3* LeoCloud.bg_check
def bg_check(self, to_check):
"""
bg_check - run from load_clouds() to look for changes in
cloud in background.
WARNING: no gui impacting calls allowed here (g.es() etc.)
Args:
to_check (list): list of (vnode, kwargs, hash) tuples to check
This (background) thread can't handle any changes found, because it
would have to interact with the user and GUI code can only be called
from the main thread. We don't want to use QThread, to allow this to
work without Qt. So we just collect results and set
self.bg_finished = True, which the main thread watches using g.IdleTime()
"""
for v, kwargs, local_hash in to_check:
c = v.context
p = c.vnode2position(v)
lc_io = getattr(v, '_leo_cloud_io', None) or self.io_from_node(p)
subtree = lc_io.get_subtree(lc_io.lc_id)
remote_hash = self.recursive_hash(subtree, [], include_current=False)
self.bg_results.append((v, local_hash == remote_hash))
if False and local_hash != remote_hash:
# disabled dev. / debug code
# record difference for inspection
tmpdir = tempfile.mkdtemp()
with open(os.path.join(tmpdir, 'leo_cloug_local.json'), 'w') as out:
out.write(self.to_json(self.to_dict(v)))
with open(os.path.join(tmpdir, 'leo_cloug_remote.json'), 'w') as out:
out.write(self.to_json(self.to_dict(subtree)))
self.bg_finished = True
#@+node:ekr.20201012111338.24: *3* LeoCloud.bg_post_process
def bg_post_process(self, timer):
"""
bg_post_process - check to see if background checking is finished,
handle any changed cloud trees found
Args:
timer (leo-idle-timer): Leo idle timer
"""
if not self.bg_finished:
return
timer.stop()
from_background = set()
for v, unchanged in self.bg_results:
kwargs = self.kw_from_node(v)
if unchanged:
g.es("Cloud tree '%s' unchanged" % kwargs['ID'])
else:
from_background.add((kwargs['remote'], kwargs['ID']))
g.es("Cloud tree '%s' DOES NOT MATCH" % kwargs['ID'])
if from_background:
self.load_clouds(from_background=from_background)
#@+node:ekr.20201012111338.25: *3* LeoCloud.find_at_leo_cloud
def find_at_leo_cloud(self, p):
"""find_at_leo_cloud - find @leo_cloud node
Args:
p (position): start from here, work up
Returns:
position or None
"""
while not p.h.startswith("@leo_cloud") and p.parent():
p = p.parent()
if not p.h.startswith("@leo_cloud"):
g.es("No @leo_cloud node found", color='red')
return None
return p
#@+node:ekr.20201012111338.26: *3* LeoCloud._find_clouds_recursive
def _find_clouds_recursive(self, v, found):
"""see find_clouds()"""
if v.h.startswith('@ignore'):
return
if v.h.startswith('@leo_cloud'):
found.add(v)
return
for child in v.children:
self._find_clouds_recursive(child, found)
#@+node:ekr.20201012111338.27: *3* LeoCloud.find_clouds
def find_clouds(self):
"""find_clouds - return a list of @leo_cloud nodes
respects @ignore in headlines, doesn't recurse into @leo_cloud nodes
"""
found = set()
self._find_clouds_recursive(self.c.hiddenRootNode, found)
valid = []
for lc in found:
if 'ID' in self.kw_from_node(lc):
valid.append(lc)
else:
g.es('%s - no ID: line' % lc.h, color='red')
return valid
#@+node:ekr.20201012111338.28: *3* LeoCloud._from_dict_recursive
def _from_dict_recursive(self, top, d):
"""see from_dict()"""
top.h = d['h']
top.b = d['b']
top.u = d['u']
top.children[:] = []
for child in d['children']:
top.children.append(self._from_dict_recursive(vnode(self.c), child))
return top
#@+node:ekr.20201012111338.29: *3* LeoCloud.from_dict
def from_dict(self, d):
"""from_dict - make a Leo subtree from a dict
Args:
d (dict): input dict
Returns:
vnode
"""
return self._from_dict_recursive(vnode(self.c), d)
#@+node:ekr.20201012111338.30: *3* LeoCloud.io_from_node
def io_from_node(self, p):
"""io_from_node - create LeoCloudIO instance from body text
Args:
p (position): node containing text
Returns:
LeoCloudIO instance
"""
kwargs = self.kw_from_node(p)
# pylint: disable=eval-used
lc_io_class = eval("LeoCloudIO%s" % kwargs['type'])
return lc_io_class(self.c, p, kwargs)
#@+node:ekr.20201012111338.31: *3* LeoCloud.kw_from_node
def kw_from_node(self, p):
"""kw_from_node - read keywords from body text
Args:
p (position): node containing text
Returns:
dict
"""
kwargs = {'remote': None}
# some methods assume 'remote' exists, but it's absent in LeoCloudIOFileSystem
for line in p.b.split('\n'):
kwarg = KWARG_RE.match(line)
if kwarg:
kwargs[kwarg.group(1)] = kwarg.group(2)
return kwargs
#@+node:ekr.20201012111338.32: *3* LeoCloud.load_clouds
def load_clouds(self, from_background=None):
"""
load_clouds - Handle loading from cloud on startup and after
background checking for changes.
Args:
from_background (set): set of (remote, ID) str tuples if we're
called after a background check process finds changes.
"""
if from_background is None:
from_background = set()
skipped = []
background = [] # things to check in background
for lc_v in self.find_clouds():
kwargs = self.kw_from_node(lc_v)
if from_background and \
(kwargs['remote'], kwargs['ID']) not in from_background:
# only process nodes from the background checking
continue
read = False
read_on_load = kwargs.get('read_on_load', '').lower()
if from_background:
# was 'background', changes found, so now treat as 'ask'
read_on_load = 'ask'
if read_on_load == 'yes':
read = True
elif read_on_load == 'ask':
try:
last_read = datetime.strptime(
lc_v.u['_leo_cloud']['last_read'], "%Y-%m-%dT%H:%M:%S.%f")
except KeyError:
last_read = None
message = "Read cloud data '%s', overwriting local nodes?" % kwargs['ID']
if last_read:
delta = datetime.now() - last_read
message = "%s\n%s, %sh:%sm:%ss ago" % (
message, last_read.strftime("%a %b %d %H:%M"),
24*delta.days+int(delta.seconds / 3600),
int(delta.seconds / 60) % 60,
delta.seconds % 60)
read = g.app.gui.runAskYesNoCancelDialog(self.c, "Read cloud data?",
message=message)
read = str(read).lower() == 'yes'
if read:
self.read_current(p=self.c.vnode2position(lc_v))
elif read_on_load == 'background':
# second time round, with from_background data, this will
# have been changed to 'ask' (above), so no infinite loop
background.append((lc_v, kwargs,
self.recursive_hash(lc_v, [], include_current=False)))
elif read_on_load == 'no':
g.es("NOTE: not reading '%s' from cloud" % kwargs['ID'])
elif read_on_load != 'ask':
skipped.append(kwargs['ID'])
if skipped:
g.app.gui.runAskOkDialog(self.c, "Unloaded cloud data",
message="There is unloaded (possibly stale) cloud data, use\nread_on_load: yes|no|ask\n"
"in @leo_cloud nodes to avoid this message.\nUnloaded data:\n%s" % ', '.join(skipped))
if background:
# send to background thread for checking
names = ', '.join([i[1]['ID'] for i in background])
g.es("Checking cloud trees in background:\n%s" % names)
thread = threading.Thread(target=self.bg_check, args=(background,))
thread.start()
# start watching for results
g.IdleTime(self.bg_post_process).start()
#@+node:ekr.20201012111338.33: *3* LeoCloud.read_current
def read_current(self, p=None):
"""read_current - read current tree from cloud
"""
if p is None:
p = self.find_at_leo_cloud(self.c.p)
if not p:
return
old_p = self.c.p.copy()
g.es("Reading from cloud...") # some io's as slow to init. - reassure user
# io's can cache themselves on the vnode, but should think hard
# about whether they want to
lc_io = getattr(p.v, '_leo_cloud_io', None) or self.io_from_node(p)
v = lc_io.get_subtree(lc_io.lc_id)
p.deleteAllChildren()
for child_n, child in enumerate(v.children):
child._addLink(child_n, p.v)
if hasattr(self.c, 'cleo'):
self.c.cleo.loadAllIcons()
self.c.redraw(p=old_p if self.c.positionExists(old_p) else p)
g.es("Read %s" % lc_io.lc_id)
# set c changed but don't dirty tree, which would cause
# write to cloud prompt on save
# but... (a) top node is ending up dirty anyway, and (b) this is ok
# because we want the user to understand why the outline's changed,
# so just ignore top node dirtiness in self.subtree_changed()
self.c.setChanged()
p.v.u.setdefault('_leo_cloud', {})['last_read'] = datetime.now().isoformat()
#@+node:ekr.20201012111338.34: *3* LeoCloud.recursive_hash
@staticmethod
def recursive_hash(nd, tree, include_current=True):
"""
recursive_hash - recursively hash a tree
Args:
nd (vnode): node to hash
tree (list): recursive list of hashes
include_current (bool): include h/b/u of current node in hash?
Returns:
str: sha1 hash of tree
Calling with include_current=False ignores the h/b/u of the top node
To hash a dict, need a string representation
that sorts keys, i.e. json.dumps(s, sort_keys=True)
Trailing newlines are ignored in body text.
"""
childs = []
hashes = [LeoCloud.recursive_hash(child, childs) for child in nd.children]
if include_current:
hashes.extend([nd.h + nd.b.rstrip('\n') + json.dumps(LeoCloud._ua_clean(nd.u), sort_keys=True)])
whole_hash = sha1(''.join(hashes).encode('utf-8')).hexdigest()
tree.append([whole_hash, childs])
return whole_hash
#@+node:ekr.20201012111338.35: *3* LeoCloud.save_clouds
def save_clouds(self):
"""check for clouds to save when outline is saved"""
skipped = []
no = []
unchanged = []
for lc_v in self.find_clouds():
kwargs = self.kw_from_node(lc_v)
write = False
write_on_save = kwargs.get('write_on_save', '').lower()
if not self.subtree_changed(lc_v):
write_on_save = 'unchanged'
if write_on_save == 'yes':
write = True
elif write_on_save == 'ask':
write = g.app.gui.runAskYesNoCancelDialog(self.c, "Write cloud data?",
message="Write cloud data '%s', overwriting remote version?" % kwargs['ID'])
write = str(write).lower() == 'yes'
if write:
self.write_current(p=self.c.vnode2position(lc_v))
elif write_on_save == 'no':
no.append(kwargs['ID'])
elif write_on_save == 'unchanged':
unchanged.append(kwargs['ID'])
elif write_on_save != 'ask':
skipped.append(kwargs['ID'])
if skipped:
g.app.gui.runAskOkDialog(self.c, "Unsaved cloud data",
message="There is unsaved cloud data, use\nwrite_on_save: yes|no|ask\n"
"in @leo_cloud nodes to avoid this message.\nUnsaved data:\n%s" % ', '.join(skipped))
if unchanged:
g.es("Unchanged cloud data: %s" % ', '.join(unchanged))
if no:
g.es("Cloud data never saved: %s" % ', '.join(no))
#@+node:ekr.20201012111338.36: *3* LeoCloud.subtree_changed
def subtree_changed(self, p):
"""subtree_changed - check if subtree is changed
Args:
p (position): top of subtree
Returns:
bool
"""
if isinstance(p, vnode):
p = self.c.vnode2position(p)
for nd in p.subtree_iter():
if nd.isDirty():
break
else:
return False
return True
#@+node:ekr.20201012111338.37: *3* LeoCloud._to_json_serial
@staticmethod
def _to_json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError ("Type %s not serializable" % type(obj))
#@+node:ekr.20201012111338.38: *3* LeoCloud.to_json
@staticmethod
def to_json(data):
"""to_json - convert dict to appropriate JSON
Args:
data (dict): data to convert
Returns:
str: json
"""
return json.dumps(
data,
sort_keys=True, # prevent unnecessary diffs
indent=0, # make json readable on cloud web pages
default=LeoCloud._to_json_serial
)
#@+node:ekr.20201012111338.39: *3* LeoCloud._to_dict_recursive
@staticmethod
def _to_dict_recursive(v, d):
"""_to_dict_recursive - recursively make dictionary representation of v
Args:
v (vnode): subtree to convert
d (dict): dict for results
Returns:
dict of subtree
"""
d['b'] = v.b
d['h'] = v.h
d['u'] = v.u
d['children'] = []
for child in v.children:
d['children'].append(LeoCloud._to_dict_recursive(child, dict()))
return d
#@+node:ekr.20201012111338.40: *3* LeoCloud.to_dict
@staticmethod
def to_dict(v):
"""to_dict - make dictionary representation of v
Args:
v (vnode): subtree to convert
Returns:
dict of subtree
"""
return LeoCloud._to_dict_recursive(v, dict())
#@+node:ekr.20201012111338.41: *3* LeoCloud._ua_clean
@staticmethod
def _ua_clean(d):
"""_ua_clean - strip todo icons from dict
Args:
d (dict): dict to clean
Returns:
cleaned dict
recursive_hash() to compare trees stumbles on todo icons which are
derived information from the todo attribute and include *local*
paths to icon images
"""
d = deepcopy(d)
if 'icons' in d:
d['icons'] = [i for i in d['icons'] if not i.get('cleoIcon')]
return d
#@+node:ekr.20201012111338.42: *3* LeoCloud.write_current
def write_current(self, p=None):
"""write_current - write current tree to cloud
"""
if p is None:
p = self.find_at_leo_cloud(self.c.p)
if not p:
return
g.es("Storing to cloud...") # some io's as slow to init. - reassure user
lc_io = getattr(p.v, '_leo_cloud_io', None) or self.io_from_node(p)
lc_io.put_subtree(lc_io.lc_id, p.v)
g.es("Stored %s" % lc_io.lc_id)
# writing counts as reading, last read time msg. confusing otherwise
p.v.u.setdefault('_leo_cloud', {})['last_read'] = datetime.now().isoformat()
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
utils.py
|
"""Globally shared common utilities functions/classes/variables"""
import config
import constants
import cPickle
import logging
import pymongo
import string
import threading
import time
from bson.son import SON
def _make_logger():
"""Create a new logger"""
logger = logging.getLogger("parse.flashback")
logger.setLevel(config.APP_CONFIG["logging_level"])
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)s:%(processName)s] %(message)s",
"%m-%d %H:%M:%S"))
logger.addHandler(handler)
return logger
# log will be used globally for all logging code.
LOG = _make_logger()
def unpickle(input_file):
"""Safely unpack entry from the file"""
try:
return cPickle.load(input_file)
except EOFError:
return None
def unpickle_iterator(filename):
"""Return the unpickled objects as a sequence of objects"""
f = open(filename)
while True:
result = unpickle(f)
if result:
yield result
else:
raise StopIteration
def now_in_utc_secs():
"""Get current time in seconds since UTC epoch"""
return int(time.time())
def create_tailing_cursor(collection, criteria, oplog=False):
"""
Create a cursor that constantly tails the latest documents from the
database.
criteria is a query dict (for filtering op types, targeting a specifc set
of collections, etc.).
"""
tailer = collection.find(
criteria, slave_okay=True, tailable=True, await_data=True,
as_class=SON)
# Set oplog_replay on the cursor, which allows queries against the oplog to run much faster
if oplog:
tailer.add_option(pymongo.cursor._QUERY_OPTIONS['oplog_replay'])
return tailer
def get_start_time(collection):
"""Get the latest element's timestamp from a collection with "ts" field"""
result = collection.find().limit(1).sort([("ts", pymongo.DESCENDING)])
try:
return result.next()["ts"]
except StopIteration:
return None
class EmptyClass(object):
"""Empty class"""
def set_interval(interval, start_immediately=True, exec_on_exit=True):
"""An decorator that executes the event every n seconds"""
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
if start_immediately:
function(*args, **kwargs)
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
if exec_on_exit:
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
def make_ns_selector(databases, target_collections):
system_collections = \
set([constants.PROFILER_COLLECTION, constants.INDEX_COLLECTION])
if target_collections is not None:
target_collections = set(target_collections)
target_collections -= system_collections
if target_collections is not None and len(target_collections) > 0:
return {"$in": ["{0}.{1}".format(database, coll)
for coll in target_collections
for database in databases]}
else:
return {
"$regex": r"^({})\.".format(string.join(databases, '|')),
"$nin": ["{0}.{1}".format(database, coll)
for coll in system_collections
for database in databases]
}
def get_oplog_tailer(oplog_client, types, target_dbs, target_colls,
start_time=None):
"""Start recording the oplog entries starting from now.
We only care about "insert" operations since all other queries will
be captured by mongodb oplog collection.
REQUIRED: the specific mongodb database has enabled profiling.
"""
oplog_collection = \
oplog_client[constants.LOCAL_DB][constants.OPLOG_COLLECTION]
criteria = {
"op": {"$in": types},
"ns": make_ns_selector(target_dbs, target_colls)
}
if start_time is not None:
criteria["ts"] = {"$gte": start_time}
return create_tailing_cursor(oplog_collection, criteria, oplog=True)
def get_profiler_tailer(client, target_db, target_colls, start_time):
"""Start recording the profiler entries"""
profiler_collection = client[target_db][constants.PROFILER_COLLECTION]
criteria = {
"ns": make_ns_selector([target_db], target_colls),
"ts": {"$gte": start_time}
}
return create_tailing_cursor(profiler_collection, criteria)
class DictionaryCopier(object):
"""Simple tool for copy the fields from source dict on demand"""
def __init__(self, source):
self.src = source
self.dest = {}
def copy_fields(self, *fields):
for field in fields:
if field in self.src:
self.dest[field] = self.src[field]
|
models.py
|
import logging
import os
import threading
import uuid
from ipaddress import ip_address, ip_interface
import yaml
from django.db import models
from ansible_api.models.mixins import AbstractExecutionModel
from cloud_provider import get_cloud_client
from common import models as common_models
from kubeoperator import settings
from django.utils.translation import ugettext_lazy as _
from kubeops_api.models.host import Host
logger = logging.getLogger('cloud_provider')
class CloudProviderTemplate(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
meta = common_models.JsonTextField(blank=True, null=True, verbose_name=_('Meta'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template_dir = os.path.join(settings.BASE_DIR, 'resource', 'clouds')
@property
def path(self):
return os.path.join(self.template_dir, self.name)
@classmethod
def lookup(cls):
for d in os.listdir(cls.template_dir):
full_path = os.path.join(cls.template_dir, d)
meta_path = os.path.join(full_path, 'meta.yml')
if not os.path.isdir(full_path) or not os.path.isfile(meta_path):
continue
with open(meta_path) as f:
metadata = yaml.load(f)
defaults = {'name': d, 'meta': metadata}
cls.objects.update_or_create(defaults=defaults, name=d)
class Region(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
template = models.ForeignKey('CloudProviderTemplate', on_delete=models.SET_NULL, null=True)
cloud_region = models.CharField(max_length=128, null=True, default=None)
vars = common_models.JsonDictTextField(default={})
comment = models.CharField(max_length=128, blank=True, null=True, verbose_name=_("Comment"))
@property
def zone_size(self):
zones = Zone.objects.filter(region=self)
return len(zones)
@property
def cluster_size(self):
clusters = []
plans = Plan.objects.filter(region=self)
for plan in plans:
from kubeops_api.models.cluster import Cluster
cs = Cluster.objects.filter(plan=plan)
for c in cs:
clusters.append(c)
return len(clusters)
@property
def image_ovf_path(self):
return self.vars['image_ovf_path']
@property
def image_vmdk_path(self):
return self.vars['image_vmdk_path']
@property
def image_name(self):
return self.vars['image_name']
def set_vars(self):
meta = self.template.meta.get('region', None)
if meta:
_vars = meta.get('vars', {})
self.vars.update(_vars)
self.save()
def on_region_create(self):
self.set_vars()
def to_dict(self):
dic = {
"region": self.cloud_region
}
dic.update(self.vars)
return dic
class Zone(models.Model):
ZONE_STATUS_READY = "READY"
ZONE_STATUS_INITIALIZING = "INITIALIZING"
ZONE_STATUS_ERROR = "ERROR"
ZONE_STATUS_CHOICES = (
(ZONE_STATUS_READY, 'READY'),
(ZONE_STATUS_INITIALIZING, 'INITIALIZING'),
(ZONE_STATUS_ERROR, 'ERROR'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
vars = common_models.JsonDictTextField(default={})
region = models.ForeignKey('Region', on_delete=models.CASCADE, null=True)
cloud_zone = models.CharField(max_length=128, null=True, default=None)
ip_used = common_models.JsonListTextField(null=True, default=[])
status = models.CharField(max_length=64, choices=ZONE_STATUS_CHOICES, null=True)
@property
def host_size(self):
hosts = Host.objects.filter(zone=self)
return len(hosts)
def change_status(self, status):
self.status = status
self.save()
def create_image(self):
try:
logger.info('upload os image')
self.change_status(Zone.ZONE_STATUS_INITIALIZING)
client = get_cloud_client(self.region.vars)
client.create_image(zone=self)
self.change_status(Zone.ZONE_STATUS_READY)
except Exception as e:
logger.error(msg='upload os image error!', exc_info=True)
self.change_status(Zone.ZONE_STATUS_ERROR)
def on_zone_create(self):
thread = threading.Thread(target=self.create_image)
thread.start()
def allocate_ip(self):
ip = self.ip_pools().pop()
self.ip_used.append(ip)
self.save()
return ip
def recover_ip(self, ip):
self.ip_used.remove(ip)
self.save()
def to_dict(self):
dic = {
"key": "z" + str(self.id).split("-")[3],
"name": self.cloud_zone,
"zone_name": self.name,
"ip_pool": self.ip_pools()
}
dic.update(self.vars)
return dic
def ip_pools(self):
ip_pool = []
ip_start = ip_address(self.vars['ip_start'])
ip_end = ip_address(self.vars['ip_end'])
if self.region.template.name == 'openstack':
while ip_start <= ip_end:
ip_pool.append(str(ip_start))
ip_start += 1
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
net_mask = self.vars['net_mask']
interface = ip_interface("{}/{}".format(str(ip_start), net_mask))
network = interface.network
for host in network.hosts():
if ip_start <= host <= ip_end:
ip_pool.append(str(host))
for ip in self.ip_used:
if ip in ip_pool:
ip_pool.remove(ip)
return ip_pool
def ip_available_size(self):
return len(self.ip_pools())
def has_plan(self):
for plan in Plan.objects.all():
for zone in plan.get_zones():
if zone.name == self.name:
return True
return False
@property
def provider(self):
return self.region.template.name
class Plan(models.Model):
DEPLOY_TEMPLATE_SINGLE = "SINGLE"
DEPLOY_TEMPLATE_MULTIPLE = "MULTIPLE"
DEPLOY_TEMPLATE_CHOICES = (
(DEPLOY_TEMPLATE_SINGLE, 'single'),
(DEPLOY_TEMPLATE_MULTIPLE, 'multiple'),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(max_length=20, unique=True, verbose_name=_('Name'))
date_created = models.DateTimeField(auto_now_add=True, verbose_name=_('Date created'))
zone = models.ForeignKey('Zone', null=True, on_delete=models.CASCADE)
region = models.ForeignKey('Region', null=True, on_delete=models.CASCADE)
zones = models.ManyToManyField('Zone', related_name='zones')
deploy_template = models.CharField(choices=DEPLOY_TEMPLATE_CHOICES, default=DEPLOY_TEMPLATE_SINGLE, max_length=128)
vars = common_models.JsonDictTextField(default={})
@property
def provider(self):
return self.region.vars['provider']
@property
def mixed_vars(self):
_vars = self.vars.copy()
_vars.update(self.region.to_dict())
zones = self.get_zones()
zone_dicts = []
for zone in zones:
zone_dicts.append(zone.to_dict())
_vars['zones'] = zone_dicts
return _vars
def get_zones(self):
zones = []
if self.zone:
zones.append(self.zone)
if self.zones:
zones.extend(self.zones.all())
return zones
def count_ip_available(self):
zones = self.get_zones()
num = 0
for zone in zones:
num += zone.ip_available_size()
return num
@property
def compute_models(self):
return {
"master": self.vars.get('master_model', None),
"worker": self.vars.get('worker_model', None)
}
|
Day11-04.py
|
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(outH) + 'x' + str(outW))
canvas = Canvas(window, width=outW, height=outH)
paper = PhotoImage(width=outW, height=outH)
canvas.create_image((outW/2, outH/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH) :
for k in range(0, outW) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (k,i))
threading.Thread(target=putPixel).start()
canvas.pack()
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
## 메인 코드부
window = Tk(); window.geometry('200x200');
window.title('영상 처리&데이터 분석 Ver 0.5')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.wallet import update_password_for_directory
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice, parse_max_spend)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from electrum.bitcoin import COIN
from electrum.gui import messages
from .i18n import _
from .util import get_default_language
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_auto_connect(self, b: bool):
# This method makes sure we persist x into the config even if self.auto_connect == b.
# Note: on_auto_connect() only gets called if the value of the self.auto_connect property *changes*.
self.electrum_config.set_key('auto_connect', b)
self.auto_connect = b
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
@switch_to_send_screen
def set_ln_invoice(self, invoice):
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME or scheme == LIGHTNING_URI_SCHEME:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / COIN
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = COIN * Decimal(fiat_amount) / Decimal(rate)
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str: str) -> Optional[int]:
if not amount_str:
return None
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
_init_finished = False
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', get_default_language())
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
self._init_finished = True
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.set_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.lightning_invoice if invoice.is_lightning() else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
if self.network and self.electrum_config.get('auto_connect') is None:
self.popup_dialog("first_screen")
# load_wallet_on_start will be called later, after initial network setup is completed
else:
# load wallet
self.load_wallet_on_start()
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def load_wallet_on_start(self):
"""As part of app startup, try to load last wallet."""
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.is_up_to_date() or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.is_up_to_date():
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.electrum_config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if parse_max_spend(x):
return f'max({x})'
# FIXME this is using format_satoshis_plain instead of config.format_amount
# as we sometimes convert the returned string back to numbers,
# via self.get_amount()... the need for converting back should be removed
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_amount_and_units_with_fiat(self, x) -> str:
text = self.format_amount_and_units(x)
fiat = self.fx.format_amount_and_units(x) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.is_up_to_date() or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
try:
backup_dir = util.android_backup_dir()
except OSError as e:
self.logger.exception("Cannot save backup")
self.show_error(f"Cannot save backup: {e!r}")
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
|
test_qt_notifications.py
|
import sys
import threading
import time
import warnings
from unittest.mock import patch
import dask.array as da
import pytest
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QPushButton
from napari._qt.dialogs.qt_notification import NapariQtNotification
from napari.utils.notifications import (
ErrorNotification,
Notification,
NotificationSeverity,
notification_manager,
)
PY37_OR_LOWER = sys.version_info[:2] <= (3, 7)
def _threading_warn():
thr = threading.Thread(target=_warn)
thr.start()
def _warn():
time.sleep(0.1)
warnings.warn('warning!')
def _threading_raise():
thr = threading.Thread(target=_raise)
thr.start()
def _raise():
time.sleep(0.1)
raise ValueError("error!")
@pytest.fixture
def clean_current(monkeypatch, qtbot):
from napari._qt.qt_main_window import _QtMainWindow
def none_return(*_, **__):
return None
base_show = NapariQtNotification.show
def store_widget(self, *args, **kwargs):
qtbot.addWidget(self)
base_show(self, *args, **kwargs)
# monkeypatch.setattr(qt_notification.QPropertyAnimation, "start", none_return)
monkeypatch.setattr(_QtMainWindow, "current", none_return)
monkeypatch.setattr(NapariQtNotification, "show", store_widget)
@pytest.mark.parametrize(
"raise_func,warn_func",
[(_raise, _warn), (_threading_raise, _threading_warn)],
)
@pytest.mark.order(11)
def test_notification_manager_via_gui(
qtbot, raise_func, warn_func, clean_current
):
"""
Test that the notification_manager intercepts `sys.excepthook`` and
`threading.excepthook`.
"""
errButton = QPushButton()
warnButton = QPushButton()
errButton.clicked.connect(raise_func)
warnButton.clicked.connect(warn_func)
with notification_manager:
for btt, expected_message in [
(errButton, 'error!'),
(warnButton, 'warning!'),
]:
notification_manager.records = []
qtbot.mouseClick(btt, Qt.LeftButton)
qtbot.wait(500)
assert len(notification_manager.records) == 1
assert notification_manager.records[0].message == expected_message
notification_manager.records = []
@pytest.mark.parametrize('severity', NotificationSeverity.__members__)
@patch('napari._qt.dialogs.qt_notification.QDialog.show')
def test_notification_display(mock_show, severity, monkeypatch):
"""Test that NapariQtNotification can present a Notification event.
NOTE: in napari.utils._tests.test_notification_manager, we already test
that the notification manager successfully overrides sys.excepthook,
and warnings.showwarning... and that it emits an event which is an instance
of napari.utils.notifications.Notification.
in `get_app()`, we connect `notification_manager.notification_ready` to
`NapariQtNotification.show_notification`, so all we have to test here is
that show_notification is capable of receiving various event types.
(we don't need to test that )
"""
from napari.utils.settings import get_settings
settings = get_settings()
monkeypatch.delenv('NAPARI_CATCH_ERRORS', raising=False)
monkeypatch.setattr(settings.application, 'gui_notification_level', 'info')
notif = Notification('hi', severity, actions=[('click', lambda x: None)])
NapariQtNotification.show_notification(notif)
if NotificationSeverity(severity) >= NotificationSeverity.INFO:
mock_show.assert_called_once()
else:
mock_show.assert_not_called()
dialog = NapariQtNotification.from_notification(notif)
assert not dialog.property('expanded')
dialog.toggle_expansion()
assert dialog.property('expanded')
dialog.toggle_expansion()
assert not dialog.property('expanded')
dialog.close()
@patch('napari._qt.dialogs.qt_notification.QDialog.show')
def test_notification_error(mock_show, monkeypatch, clean_current):
from napari.utils.settings import get_settings
settings = get_settings()
monkeypatch.delenv('NAPARI_CATCH_ERRORS', raising=False)
monkeypatch.setattr(settings.application, 'gui_notification_level', 'info')
try:
raise ValueError('error!')
except ValueError as e:
notif = ErrorNotification(e)
dialog = NapariQtNotification.from_notification(notif)
bttn = dialog.row2_widget.findChild(QPushButton)
assert bttn.text() == 'View Traceback'
mock_show.assert_not_called()
bttn.click()
mock_show.assert_called_once()
@pytest.mark.skipif(PY37_OR_LOWER, reason="Fails on py37")
def test_notifications_error_with_threading(make_napari_viewer):
"""Test notifications of `threading` threads, using a dask example."""
random_image = da.random.random(size=(50, 50))
with notification_manager:
viewer = make_napari_viewer()
viewer.add_image(random_image)
result = da.divide(random_image, da.zeros(50, 50))
viewer.add_image(result)
assert len(notification_manager.records) >= 1
notification_manager.records = []
|
shell_backend.py
|
from . import Backend, SeamlessTransformationError, JoblessRemoteError
import asyncio
import sys, os, tempfile, shutil
import psutil
import json
import subprocess, tarfile
from functools import partial
import numpy as np
from io import BytesIO
import multiprocessing as mp
import traceback
PROCESS = None
def kill_children():
process = PROCESS
if process is None:
return
children = []
try:
children = psutil.Process(process.pid).children(recursive=True)
except:
pass
for child in children:
try:
child.kill()
except:
pass
class ShellBackend(Backend):
JOB_TEMPDIR = None
support_symlinks = True
def __init__(self, *args, executor, **kwargs):
self.executor = executor
self.coros = {}
super().__init__(*args, **kwargs)
def get_job_status(self, checksum, identifier):
return 2, None, None
async def run(self, checksum, transformation, prepared_transformation, tempdir, env):
"""Return awaitable. To be implemented by subclass"""
raise NotImplementedError
def _run(self, checksum, transformation, prepared_transformation):
from .file_transformer_plugin import write_files
global PROCESS
PROCESS = None
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix="jobless-", dir=self.JOB_TEMPDIR)
print("Running shell job in {}".format(tempdir), file=sys.stderr)
try:
os.chdir(tempdir)
env = {}
write_files(prepared_transformation, env, self.support_symlinks)
return self.run(checksum, transformation, prepared_transformation, tempdir, env)
finally:
kill_children()
os.chdir(old_cwd)
shutil.rmtree(tempdir, ignore_errors=True)
def launch_transformation(self, checksum, transformation, prepared_transformation):
prepared_transformation = prepared_transformation.copy()
for key in prepared_transformation:
if key in ("__checksum__", "__env__"):
continue
filename, value, env_value = prepared_transformation[key]
if filename is None:
continue
prepared_transformation[key] = os.path.abspath(os.path.expanduser(filename)), value, env_value
def func(queue):
try:
result = self._run(checksum, transformation, prepared_transformation)
queue.put((False, result))
except Exception as exc:
tb = traceback.format_exc()
queue.put((True, (exc, tb)))
def func2():
try:
with mp.Manager() as manager:
queue = manager.Queue()
p = mp.Process(target=func, args=(queue,))
p.start()
p.join()
has_error, payload = queue.get()
if has_error:
exc, tb = payload
if isinstance(exc, SeamlessTransformationError):
raise exc
else:
raise JoblessRemoteError(exc, tb)
else:
result = payload
return result
finally:
self.coros.pop(checksum, None)
coro = asyncio.get_event_loop().run_in_executor(self.executor, func2)
self.coros[checksum] = asyncio.ensure_future(coro)
return coro, None
def cancel_job(self, checksum, identifier):
if checksum in self.coros:
coro = self.coros.pop(checksum)
task = asyncio.ensure_future(coro)
task.cancel()
def get_docker_command_and_image(prepared_transformation):
if "bashcode" in prepared_transformation:
docker_command = prepared_transformation["bashcode"][1]
else:
docker_command = prepared_transformation["docker_command"][1]
if isinstance(docker_command, bytes):
docker_command = docker_command.decode()
if "docker_image_" in prepared_transformation:
docker_image = prepared_transformation["docker_image_"][1].decode()
docker_image = json.loads(docker_image)
if isinstance(docker_image, bytes):
docker_image = docker_image.decode()
else:
docker_image = prepared_transformation["__env__"]["docker"]["name"]
return docker_command, docker_image
def read_data(data):
try:
npdata = BytesIO(data)
return np.load(npdata)
except (ValueError, OSError):
try:
try:
sdata = data.decode()
except Exception:
return np.frombuffer(data, dtype=np.uint8)
return json.loads(sdata)
except ValueError:
return sdata
def execute_local(bashcode, env, resultfile):
global PROCESS
try:
bash_header = """set -u -e
""" # don't add "trap 'jobs -p | xargs -r kill' EXIT" as it gives serious problems
bashcode2 = bash_header + bashcode
process = subprocess.run(
bashcode2, capture_output=True, shell=True, check=True,
executable='/bin/bash',
env=env
)
PROCESS = process
except subprocess.CalledProcessError as exc:
stdout = exc.stdout
try:
stdout = stdout.decode()
except:
pass
stderr = exc.stderr
try:
stderr = stderr.decode()
except:
pass
raise SeamlessTransformationError("""
Bash transformer exception
==========================
*************************************************
* Command
*************************************************
{}
*************************************************
*************************************************
* Standard output
*************************************************
{}
*************************************************
*************************************************
* Standard error
*************************************************
{}
*************************************************
""".format(bashcode, stdout, stderr)) from None
if not os.path.exists(resultfile):
msg = """
Bash transformer exception
==========================
Error: Result file {} does not exist
*************************************************
* Command
*************************************************
{}
*************************************************
""".format(resultfile, bashcode)
try:
stdout = process.stdout.decode()
if len(stdout):
msg += """*************************************************
* Standard output
*************************************************
{}
*************************************************
""".format(stdout)
stderr = process.stderr.decode()
if len(stderr):
msg += """*************************************************
* Standard error
*************************************************
{}
*************************************************
""".format(stderr)
except:
pass
raise SeamlessTransformationError(msg)
else:
stdout = process.stdout
try:
stdout = stdout.decode()
except Exception:
pass
stderr = process.stderr
try:
stderr = stderr.decode()
except Exception:
pass
return parse_resultfile(resultfile)
def execute_docker(docker_command, docker_image, tempdir, env, resultfile):
"""Ignore docker_options"""
from requests.exceptions import ConnectionError
from urllib3.exceptions import ProtocolError
import docker as docker_module
docker_client = docker_module.from_env()
volumes, options = {}, {}
volumes[tempdir] = {"bind": "/run", "mode": "rw"}
options["working_dir"] = "/run"
options["volumes"] = volumes
options["environment"] = env
with open("DOCKER-COMMAND","w") as f:
bash_header = """set -u -e
""" # don't add "trap 'jobs -p | xargs -r kill' EXIT" as it gives serious problems
f.write(bash_header)
f.write(docker_command)
full_docker_command = "bash DOCKER-COMMAND"
try:
try:
_creating_container = True
container = docker_client.containers.create(
docker_image,
full_docker_command,
**options
)
finally:
_creating_container = False
try:
container.start()
exit_status = container.wait()['StatusCode']
stdout = container.logs(stdout=True, stderr=False)
try:
stdout = stdout.decode()
except:
pass
stderr = container.logs(stdout=False, stderr=True)
try:
stderr = stderr.decode()
except:
pass
if exit_status != 0:
raise SeamlessTransformationError("""
Docker transformer exception
============================
Exit code: {}
*************************************************
* Command
*************************************************
{}
*************************************************
*************************************************
* Standard output
*************************************************
{}
*************************************************
* Standard error
*************************************************
{}
*************************************************
""".format(exit_status, docker_command, stdout, stderr)) from None
except ConnectionError as exc:
msg = "Unknown connection error"
if len(exc.args) == 1:
exc2 = exc.args[0]
if isinstance(exc2, ProtocolError):
if len(exc2.args) == 2:
a, exc3 = exc2.args
msg = "Docker gave an error: {}: {}".format(a, exc3)
if a.startswith("Connection aborted"):
if isinstance(exc3, FileNotFoundError):
if len(exc3.args) == 2:
a1, a2 = exc3.args
if a1 == 2 or a2 == "No such file or directory":
msg = "Cannot connect to Docker; did you expose the Docker socket to Seamless?"
raise SeamlessTransformationError(msg) from None
if not os.path.exists(resultfile):
msg = """
Docker transformer exception
============================
Error: Result file RESULT does not exist
*************************************************
* Command
*************************************************
{}
*************************************************
""".format(docker_command)
try:
stdout = container.logs(stdout=True, stderr=False)
try:
stdout = stdout.decode()
except Exception:
pass
if len(stdout):
msg += """*************************************************
* Standard output
*************************************************
{}
*************************************************
""".format(stdout)
stderr = container.logs(stdout=False, stderr=True)
try:
stderr = stderr.decode()
except Exception:
pass
if len(stderr):
msg += """*************************************************
* Standard error
*************************************************
{}
*************************************************
""".format(stderr)
except Exception:
pass
raise SeamlessTransformationError(msg)
else:
if len(stdout):
print(stdout[:1000])
if len(stderr):
print(stderr[:1000], file=sys.stderr)
return parse_resultfile(resultfile)
finally:
try:
container.remove()
except:
pass
def parse_resultfile(resultfile):
try:
tar = tarfile.open(resultfile)
result = {}
for member in tar.getnames():
data = tar.extractfile(member).read()
result[member] = read_data(data)
except (ValueError, tarfile.CompressionError, tarfile.ReadError):
with open(resultfile, "rb") as f:
resultdata = f.read()
result = read_data(resultdata)
return serialize(result)
####################################################################################
class ShellBashBackend(ShellBackend):
support_symlinks = True
def run(self, checksum, transformation, prepared_transformation, tempdir, env):
msg = "Submit shell bash job, checksum {}"
print(msg.format(prepared_transformation["__checksum__"]), file=sys.stderr)
bashcode = prepared_transformation["bashcode"][1]
resultfile = "RESULT"
try:
return execute_local(bashcode, env, resultfile)
except SeamlessTransformationError as exc:
raise exc from None
class ShellDockerBackend(ShellBackend):
support_symlinks = False
def __init__(self, *args, **kwargs):
import docker as docker_module
from requests.exceptions import ConnectionError
super().__init__(*args, **kwargs)
def run(self, checksum, transformation, prepared_transformation, tempdir, env):
docker_command, docker_image = get_docker_command_and_image(
prepared_transformation
)
msg = "Submit shell docker job, checksum {}, image {}"
print(
msg.format(
prepared_transformation["__checksum__"],
docker_image
),
file=sys.stderr
)
resultfile = "RESULT"
try:
return execute_docker(docker_command, docker_image, tempdir, env, resultfile)
except SeamlessTransformationError as exc:
raise exc from None
from silk.mixed.io.serialization import serialize
|
growl.py
|
#__LICENSE_GOES_HERE__
from util.packable import Packable
from util.primitives.error_handling import traceguard
from util.primitives import Storage
from gui.toast import popup
from threading import Thread, currentThread
from peak.util.addons import AddOn
from logging import getLogger; log = getLogger('growl')
GROWL_UDP_PORT = 9887
GROWL_TYPE_REGISTRATION = 0 # The packet type of registration packets with MD5 authentication.
GROWL_PREF = 'growl.popups'
def on_packet(packet):
import wx
@wx.CallAfter
def guithread():
url = None
extra = getattr(packet, 'extra', None)
if extra:
url = extra.get('url', None)
popup(header=packet.title,
minor=packet.description,
onclick=url)
class GrowlAddon(AddOn):
_server_thread = None
def __init__(self, subject):
self.profile = subject
did_setup = False
def setup(self):
if self.did_setup:
return
self.did_setup = True
self.profile.prefs.add_observer(self.on_pref_change, GROWL_PREF)
self.on_pref_change()
def on_pref_change(self, *a):
enabled = self.profile.prefs.get(GROWL_PREF, False)
if enabled:
self.start_server_thread()
else:
self.stop_server_thread()
def _server_thread_running(self):
return self._server_thread is not None and self._server_thread.is_alive()
def start_server_thread(self):
if not self._server_thread_running():
self._server_thread = thread = Thread(target=_udp_server_loop)
thread.daemon = True
thread.start()
def stop_server_thread(self):
if self._server_thread_running():
self._server_thread.die_please = True
self._server_thread = None
class GrowlPacketHeader(Packable):
fmt = ('protocol_version', 'B',
'type_notification', 'B',
'flags', 'H',
'len_notification', 'H',
'len_title', 'H',
'len_description', 'H',
'len_appname', 'H')
def unpack_packet(data):
packet_header, data = GrowlPacketHeader.unpack(data)
packet = Storage(extra=Storage())
for attr in ('notification', 'title', 'description', 'appname'):
value, data = readstring(data, getattr(packet_header, 'len_' + attr))
packet[attr] = value
#md5, data = data[:16], data[16:]
if packet.description.find('\0') != -1:
packet.description, extra = packet.description.split('\0')
import simplejson
packet.extra = simplejson.loads(extra)
return packet, data
def readstring(d, slen):
return d[:slen], d[slen:]
def _udp_server_loop():
import socket
s = socket.socket(2, 2)
try:
s.bind(('', GROWL_UDP_PORT))
except socket.error:
log.error('cannot initialize growl server loop: could not bind to port %r', GROWL_UDP_PORT)
return
while not getattr(currentThread(), 'die_please', False):
with traceguard:
try:
data, addr = s.recvfrom(1024 * 10)
except socket.timeout:
continue
if data[0] != chr(1):
# print 'first byte was not 1: %r %s', (data[0], ord(data[0]))
continue
if data[1] == chr(GROWL_TYPE_REGISTRATION):
# print 'ignoring registration packet'
continue
packet, data = unpack_packet(data)
# assert not data
on_packet(packet)
_server_thread = None
|
lambda_executors.py
|
import os
import re
import sys
import glob
import json
import time
import logging
import threading
import traceback
import subprocess
import six
import base64
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker, last_index_of,
long_uid, now, to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port, rm_docker_container)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue
from localstack.utils.aws.dead_letter_queue import sqs_error_to_dead_letter_queue
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_PROVIDED)
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
INTERNAL_LOG_PREFIX = 'ls-daemon: '
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = '/var/task'
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details) or ''
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(container_name, e))
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class InvocationResult(object):
def __init__(self, result, log_output=''):
if isinstance(result, InvocationResult):
raise Exception('Unexpected invocation result type: %s' % result)
self.result = result
self.log_output = log_output or ''
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
lambda_result_to_destination(func_details, event, result, asynchronous, raised_error)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return InvocationResult(None, log_output='Lambda executed asynchronously.')
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars=None):
env_vars = dict(env_vars or {})
runtime = func_details.runtime or ''
stdin_str = None
event_body = event if event is not None else env_vars.get('AWS_LAMBDA_EVENT_BODY')
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ''
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if not is_large_event and func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),
'AWS_LAMBDA_EVENT_BODY': to_str(event_body), # Note: seems to be needed for provided runtimes!
'DOCKER_LAMBDA_USE_STDIN': '1'
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop('AWS_LAMBDA_COGNITO_IDENTITY', None)
cmd = re.sub(r'(.*)(%s\s+(run|start|exec))' % self._docker_cmd(),
r'\1echo $AWS_LAMBDA_EVENT_BODY | \2', cmd)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug('Received large Lambda event payload (length %s) - passing via stdin' % len(event_body))
env_vars['DOCKER_LAMBDA_USE_STDIN'] = '1'
def add_env_var(cmd, name, value=None):
value = value or '$%s' % name
return re.sub(r'(%s)\s+(run|exec)\s+' % config.DOCKER_CMD,
r'\1 \2 -e %s="%s" ' % (name, value), cmd)
def rm_env_var(cmd, name):
return re.sub(r'-e\s+%s="?[^"\s]+"?' % name, '', cmd)
if env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
stdin_str = event_body
if not is_provided:
env_vars.pop('AWS_LAMBDA_EVENT_BODY', None)
if 'DOCKER_LAMBDA_USE_STDIN' not in cmd:
cmd = add_env_var(cmd, 'DOCKER_LAMBDA_USE_STDIN', '1')
cmd = rm_env_var(cmd, 'AWS_LAMBDA_EVENT_BODY')
else:
if 'AWS_LAMBDA_EVENT_BODY' not in env_vars:
env_vars['AWS_LAMBDA_EVENT_BODY'] = to_str(event_body)
cmd = add_env_var(cmd, 'AWS_LAMBDA_EVENT_BODY')
cmd = rm_env_var(cmd, 'DOCKER_LAMBDA_USE_STDIN')
kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}
process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
result, log_output = process.communicate(input=event_stdin_bytes)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
lines = result.split('\n')
idx = last_index_of(lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX))
if idx >= 0:
result = lines[idx]
additional_logs = '\n'.join(lines[:idx] + lines[idx + 1:])
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output), log_output, result)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
class ContainerInfo:
""" Contains basic information about a docker container. """
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_details, env_vars, command):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return event_body.encode()
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.info('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['EDGE_PORT'] = str(config.EDGE_PORT)
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
base64.b64decode(to_bytes(context.client_context))))
# custom command to execute in the container
command = ''
events_file_path = ''
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
# if running a Java Lambda with our custom executor, set up classpath arguments
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
events_file = '_lambda.events.%s.json' % short_uid()
events_file_path = os.path.join(lambda_cwd, events_file)
save_file(events_file_path, event_body)
# construct Java command
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(DOCKER_TASK_FOLDER, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_details, environment, command)
# copy events file into container, if necessary
if events_file_path:
container_name = self.get_container_name(func_details.arn())
self.copy_into_container(events_file_path, container_name, DOCKER_TASK_FOLDER)
# run Lambda executor and fetch invocation result
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, event=stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file_path and os.path.exists(events_file_path) and rm_rf(events_file_path)
# TODO: delete events file from container!
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_details, env_vars, command):
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(func_details, env_vars, lambda_cwd)
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# create file with environment variables
env_vars_flag = Util.create_env_vars_file_flag(env_vars)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:%s";' % (docker_cmd,
lambda_cwd, container_info.name, DOCKER_TASK_FOLDER)
cmd = (
'%s'
' %s exec -i'
' %s' # env variables file
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, env_vars_flag, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug('Creating container: %s' % container_name)
cmd = self.get_container_startup_command(func_details, env_vars, lambda_cwd)
LOG.debug(cmd)
run(cmd, env_vars=env_vars)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
self.copy_into_container('%s/.' % lambda_cwd, container_name, DOCKER_TASK_FOLDER)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = self.get_container_entrypoint(docker_image)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def get_container_startup_command(self, func_details, env_vars, lambda_cwd):
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
docker_cmd = self._docker_cmd()
container_name = self.get_container_name(func_details.arn())
# make sure we set LOCALSTACK_HOSTNAME
if not env_vars.get('LOCALSTACK_HOSTNAME'):
main_endpoint = get_main_endpoint_from_container()
env_vars['LOCALSTACK_HOSTNAME'] = main_endpoint
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop('AWS_LAMBDA_EVENT_BODY', None)
# create environment variables flag (either passed directly, or as env var file)
env_vars_flags = Util.create_env_vars_file_flag(env_vars)
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
additional_flags = config.LAMBDA_DOCKER_FLAGS or ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":%s' % (lambda_cwd_on_host, DOCKER_TASK_FOLDER) if mount_volume else ''
# Create and start the container
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' -e EDGE_PORT="$EDGE_PORT"'
' %s' # env_vars
' %s' # network
' %s' # dns
' %s' # additional flags
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str,
env_vars_flags, network_str, dns_str, additional_flags, docker_image)
return cmd
def get_container_entrypoint(self, docker_image):
""" Get the entry point for the given image """
docker_cmd = self._docker_cmd()
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .Config.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
return entry_point
def copy_into_container(self, local_path, container_name, container_path):
cmd = ('%s cp %s "%s:%s"') % (self._docker_cmd(), local_path, container_name, container_path)
LOG.debug(cmd)
run(cmd)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = '%s stop -t0 %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
rm_docker_container(container_name, safe=True)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug('Checking if there are idle containers ...')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_details, env_vars, command):
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
elif handler:
command = '"%s"' % handler
else:
command = ''
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = get_free_tcp_port()
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
additional_flags = config.LAMBDA_DOCKER_FLAGS or ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
env_vars_flag = Util.create_env_vars_file_flag(env_vars)
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
# construct common flags for commands below
common_flags = ' '.join([env_vars_flag, network_str, dns_str, additional_flags, rm_flag])
if config.LAMBDA_REMOTE_DOCKER:
cp_cmd = ('%s cp "%s/." "$CONTAINER_ID:%s";' % (
docker_cmd, lambda_cwd, DOCKER_TASK_FOLDER)) if lambda_cwd else ''
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # common flags
' %s %s' # image and command
')";'
'%s '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port,
common_flags, docker_image, command,
cp_cmd, docker_cmd)
else:
mount_flag = ''
if lambda_cwd:
mount_flag = '-v "%s":%s' % (Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
cmd = (
'%s run -i'
' %s'
' %s' # code mount
' %s' # common flags
' %s %s'
) % (docker_cmd, entrypoint, mount_flag, common_flags, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, '')
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write('%s %s' % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (request_id, func_arn)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
log_output += '\nEND RequestId: %s' % request_id
log_output += '\nREPORT RequestId: %s Duration: %s ms' % (request_id, int((end_time - start_time) * 1000))
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info('Error executing Lambda "%s": %s %s' % (func_arn, error,
''.join(traceback.format_tb(error.__traceback__))))
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(.+:)?(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ''
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
if runtime == 'nodejs14.x':
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = 'localstack/lambda-js'
return '"%s:%s"' % (docker_image, docker_tag)
@staticmethod
def get_docker_remove_flag():
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
@classmethod
def create_env_vars_file_flag(cls, env_vars, use_env_variable_names=True):
if not env_vars:
return ''
result = ''
env_vars = dict(env_vars)
if len(str(env_vars)) > MAX_ENV_ARGS_LENGTH:
# default ARG_MAX=131072 in Docker - let's create an env var file if the string becomes too long...
env_file = cls.mountable_tmp_file()
env_content = ''
for name, value in dict(env_vars).items():
if len(value) > MAX_ENV_ARGS_LENGTH:
# each line in the env file has a max size as well (error "bufio.Scanner: token too long")
continue
env_vars.pop(name)
value = value.replace('\n', '\\')
env_content += '%s=%s\n' % (name, value)
save_file(env_file, env_content)
result += '--env-file %s ' % env_file
if use_env_variable_names:
env_vars_str = ' '.join(['-e {k}="${k}"'.format(k=k) for k in env_vars.keys()])
else:
# TODO: we should remove this mid-term - shouldn't be using cmd_quote directly
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for k, v in env_vars.items()])
result += env_vars_str
return result
@staticmethod
def rm_env_vars_file(env_vars_file_flag):
if not env_vars_file_flag or '--env-file' not in env_vars_file_flag:
return
env_vars_file = env_vars_file_flag.replace('--env-file', '').strip()
return rm_rf(env_vars_file)
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.TMP_FOLDER, short_uid())
TMP_FILES.append(f)
return f
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
email.py
|
from flask import Flask
from threading import Thread
from flask_mail import Message
from app import app, mail
def send(recipient, subject, body):
'''
Send a mail to a recipient. The body is usually a rendered HTML template.
The sender's credentials has been configured in the config.py file.
'''
sender = app.config['ADMINS'][0]
message = Message(subject, sender=sender, recipients=[recipient])
message.html = body
# Create a new thread
thr = Thread(target=send_async, args=[app, message])
thr.start()
def send_async(app, message):
''' Send the mail asynchronously. '''
with app.app_context():
mail.send(message)
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
import multiprocessing
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds
from collections import OrderedDict,defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2017, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData="%s" % Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other is not None and self.BuildObject == Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo,Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join([Th.getName() for Th in threading.enumerate()]))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory,'.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append("MSFT")
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
if not self.Db_Flag:
self.Db.InitDatabase()
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db.InitDatabase()
self.Db_Flag = True
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory,'.cache','.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=',1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in ['PEI_CORE', 'PEIM', 'COMBINED_PEIM_DRIVER', 'PIC_PEIM', 'RELOCATABLE_PEIM', 'DXE_CORE']:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in ['BS_DRIVER', 'DXE_DRIVER', 'UEFI_DRIVER']:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['DXE_RUNTIME_DRIVER', 'RT_DRIVER', 'DXE_SAL_DRIVER', 'SAL_RT_DRIVER']:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['SMM_CORE', 'DXE_SMM_DRIVER', 'MM_STANDALONE', 'MM_CORE_STANDALONE']:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == 'DXE_SMM_DRIVER':
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None: continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, self.ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd()
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
for Module in self.HashSkipModules:
Module.CreateAsBuiltInf(True)
self.BuildModules = []
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache is None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase is None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS','HASH','EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError, X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
input.py
|
from displayarray.subscriber_window import window_commands
import threading
import time
from typing import Callable
class MouseEvent(object):
"""Holds all the OpenCV mouse event information."""
def __init__(self, event, x, y, flags, param):
self.event = event
self.x = x
self.y = y
self.flags = flags
self.param = param
def __repr__(self):
return self.__str__()
def __str__(self):
return "event:{}\nx,y:{},{}\nflags:{}\nparam:{}\n".format(
self.event, self.x, self.y, self.flags, self.param
)
class _mouse_thread(object): # NOSONAR
"""Run a function on mouse information that is received by the window."""
def __init__(self, f):
self.f = f
self.sub_mouse = window_commands.mouse_pub.make_sub()
def __call__(self, *args, **kwargs):
"""Call the function this was set up with."""
self.f(self.sub_mouse, *args, **kwargs)
class _mouse_loop_thread(object): # NOSONAR
"""Run a function on mouse information that is received by the window, in the main loop."""
def __init__(self, f, run_when_no_events=False, fps=60):
self.f = f
self.sub_mouse = window_commands.mouse_pub.make_sub()
self.sub_cmd = window_commands.win_cmd_pub.make_sub()
self.sub_cmd.return_on_no_data = ""
self.run_when_no_events = run_when_no_events
self.fps = fps
def __call__(self, *args, **kwargs):
"""Run the function this was set up with in a loop."""
msg_cmd = ""
while msg_cmd != "quit":
mouse_xyzclick = self.sub_mouse.get(blocking=True) # type: MouseEvent
if mouse_xyzclick is not self.sub_mouse.return_on_no_data:
self.f(mouse_xyzclick, *args, **kwargs)
elif self.run_when_no_events:
self.f(None, *args, **kwargs)
msg_cmd = self.sub_cmd.get()
time.sleep(1.0 / self.fps)
window_commands.quit(force_all_read=False)
class mouse_loop(object): # NOSONAR
"""Run a function on mouse information that is received by the window, continuously in a new thread."""
def __init__(self, f, run_when_no_events=False):
self.t = threading.Thread(target=_mouse_loop_thread(f, run_when_no_events))
self.t.start()
def __call__(self, *args, **kwargs):
"""Return the thread that was started with the function passed in."""
return self.t
class _key_thread(object): # NOSONAR
"""Run a function on mouse information that is received by the window."""
def __init__(self, f):
self.f = f
self.sub_key = window_commands.key_pub.make_sub()
def __call__(self, *args, **kwargs):
"""Call the function this was set up with."""
self.f(self.sub_key, *args, **kwargs)
class _key_loop_thread(object): # NOSONAR
"""Run a function on mouse information that is received by the window, in the main loop."""
def __init__(self, f, run_when_no_events=False, fps=60):
self.f = f
self.sub_key = window_commands.key_pub.make_sub()
self.sub_cmd = window_commands.win_cmd_pub.make_sub()
self.sub_cmd.return_on_no_data = ""
self.run_when_no_events = run_when_no_events
self.fps = fps
def __call__(self, *args, **kwargs):
"""Run the function this was set up with in a loop."""
msg_cmd = ""
while msg_cmd != "quit":
key_chr = self.sub_key.get() # type: chr
if key_chr is not self.sub_key.return_on_no_data:
self.f(key_chr, *args, **kwargs)
elif self.run_when_no_events:
self.f(None, *args, **kwargs)
msg_cmd = self.sub_cmd.get()
time.sleep(1.0 / self.fps)
window_commands.quit(force_all_read=False)
class key_loop(object): # NOSONAR
"""Run a function on mouse information that is received by the window, continuously in a new thread."""
def __init__(self, f: Callable[[str], None], run_when_no_events=False):
self.t = threading.Thread(target=_key_loop_thread(f, run_when_no_events))
self.t.start()
def __call__(self, *args, **kwargs):
"""Return the thread that was started with the function passed in."""
return self.t
|
query_alarm_state.py
|
import multiprocessing
import sys
import time
import argparse
import MySQLdb
query = ("select alarm.alarm_definition_id as definition_id, alarm_definition.name as definition_name, "
"count(distinct alarm.id) as num_alarms from alarm join alarm_definition on alarm_definition.id = "
"alarm.alarm_definition_id where alarm.state = '{0}' group by alarm.alarm_definition_id;")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--wait_time", help="Number of time between mysql queries (in seconds)", type=int,
required=False, default=120)
parser.add_argument("--mysql_password", help="Password for monapi user", required=False, default='password')
parser.add_argument("--mysql_host", help="Host running mysql we will connect to", required=False,
default='localhost')
parser.add_argument("--output_directory",
help="Output directory to place result files. Defaults to current directory", required=False)
parser.add_argument("--run_time",
help="How long, in mins, collection will run. Defaults to run indefinitely until the user hits"
" control c", required=False, type=int, default=None)
return parser.parse_args()
def query_alarm_state(args, state):
try:
conn = MySQLdb.connect(
host=args.mysql_host,
user='monapi',
passwd=args.mysql_password,
db='mon')
except MySQLdb.OperationalError, e:
print(' MySQL connection failed: {0}'.format(e))
return
output_file_name = state.lower() + "_alarm_states"
if args.output_directory:
output_file_name = args.output_directory + output_file_name
output_file = open(output_file_name, 'w')
try:
while True:
conn.query(query.format(state))
r = conn.store_result()
data = r.fetch_row(maxrows=0)
output_file.write(time.strftime("%c") + '\n')
if not data:
output_file.write("No current alarms for the state " + state + "\n")
else:
output_file.write('{:>50} {:>5}\n'.format("Alarm Definition", "Number of Alarms"))
for row in data:
output_file.write('{:>50} {:>5}\n'.format(row[1], row[2]))
output_file.flush()
time.sleep(args.wait_time)
except KeyboardInterrupt:
output_file.close()
return
def query_alarms_test():
args = parse_args()
process_list = []
p_undetermined = multiprocessing.Process(target=query_alarm_state, args=(args, 'UNDETERMINED'))
p_ok = multiprocessing.Process(target=query_alarm_state, args=(args, 'OK'))
p_alarm = multiprocessing.Process(target=query_alarm_state, args=(args, 'ALARM'))
process_list.append(p_undetermined)
process_list.append(p_ok)
process_list.append(p_alarm)
for p in process_list:
p.start()
if args.run_time is not None:
time.sleep(args.run_time * 60)
for p in process_list:
p.terminate()
else:
try:
for p in process_list:
try:
p.join()
except Exception:
pass
except KeyboardInterrupt:
pass
if __name__ == "__main__":
sys.exit(query_alarms_test())
|
queue_sample.py
|
#!/usr/bin/env python3.6
# coding=utf8
import six
import time
import threading
if six.PY2:
import Queue # PY2
share_q = Queue.Queue()
elif six.PY3:
from queue import Queue
share_q = Queue(maxsize=3)
def randomeSleep(n,m):
''' n,m indicate the sleep range '''
''' process will be stopped between n to m seconds '''
from random import randint,random
a = round(randint(n,m-1)+random(),1)
print("Sleep {0} seconds".format(a))
time.sleep(a)
sleep = lambda : randomeSleep(1,3)
obj = id("product")
def productor():
while True:
try:
__import__('ipdb').set_trace()
if share_q.full():
print("爆咯爆咯")
break
share_q.put(obj)
print("Produce an obj")
sleep()
print("queue size now :{}".format(share_q.qsize()))
except Exception as excp:
print(excp)
break
if __name__ == '__main__':
t = threading.Thread(target=productor)
t.start()
|
actor_definition.py
|
import contextlib
import logging
import os
import pkgutil
import sys
from io import UnsupportedOperation
from multiprocessing import Process, Queue
import leapp.libraries.actor
from leapp.actors import get_actors, get_actor_metadata
from leapp.exceptions import ActorInspectionFailedError, MultipleActorsError, UnsupportedDefinitionKindError, \
LeappRuntimeError
from leapp.repository import DefinitionKind
from leapp.repository.loader import library_loader
def inspect_actor(definition, result_queue):
"""
Retrieves the actor information in a child process and returns the results back through `result_queue`.
:param definition: the actor definition to load
:type definition: :py:class:`ActorDefinition`
:param result_queue: queue to pass results back to the calling process
:type result_queue: :py:class:`multiprocessing.Queue`
"""
definition.load()
result = [get_actor_metadata(actor) for actor in get_actors()]
result = [entry for entry in result if entry['path'] in definition.full_path]
result_queue.put(result)
class ActorCallContext(object):
"""
Wraps the actor execution into child process.
"""
def __init__(self, definition, logger, messaging):
"""
:param definition: Actor definition
:type definition: :py:class:`leapp.repository.actor_definition.ActorDefinition`
:param logger: Logger
:type logger: :py:class:`logging.Logger`
:param messaging: Leapp Messaging
:type messaging: :py:class:`leapp.messaging.BaseMessaging`
"""
self.definition = definition
self.logger = logger
self.messaging = messaging
@staticmethod
def _do_run(stdin, logger, messaging, definition, args, kwargs):
if stdin is not None:
try:
sys.stdin = os.fdopen(stdin)
except OSError:
pass
definition.load()
with definition.injected_context():
target_actor = [actor for actor in get_actors() if actor.name == definition.name][0]
target_actor(logger=logger, messaging=messaging).run(*args, **kwargs)
def run(self, *args, **kwargs):
"""
Performs the actor execution in the child process.
"""
try:
stdin = sys.stdin.fileno()
except UnsupportedOperation:
stdin = None
p = Process(target=self._do_run, args=(stdin, self.logger, self.messaging, self.definition, args, kwargs))
p.start()
p.join()
if p.exitcode != 0:
raise LeappRuntimeError(
'Actor {actorname} unexpectedly terminated with exit code: {exitcode}'
.format(actorname=self.definition.name, exitcode=p.exitcode))
class ActorDefinition(object):
"""
Defines actor resources.
"""
def __init__(self, directory, repo_dir, log=None):
"""
:param log: Logger
:type log: :py:class:`logging.Logger`
:param directory: Actor directory
:type directory: str
:param repo_dir: Repository directory
:type repo_dir: str
"""
self.log = log or logging.getLogger('leapp.actor')
self._directory = directory
self._repo_dir = repo_dir
self._definitions = {}
self._module = None
self._discovery = None
@property
def full_path(self):
return os.path.realpath(os.path.join(self._repo_dir, self._directory))
def add(self, kind, path):
"""
Adds any kind of actor resource to the Definition
:param kind: kind of resource added
:type kind: str
:param path: path to the added resource
:type path: str
"""
if kind not in DefinitionKind.ACTOR_WHITELIST:
self.log.error("Attempt to add item type %s to actor that is not supported", kind.name)
raise UnsupportedDefinitionKindError('Actors do not support {kind}.'.format(kind=kind.name))
self._definitions.setdefault(kind, []).append(path)
def dump(self):
"""
:return: dump of actor resources (path, name, tools, files, libraries, tests)
"""
return {
'path': self.directory,
'name': self.name,
'tools': self.tools,
'files': self.files,
'libraries': self.libraries,
'tests': self.tests
}
def load(self):
"""
Loads the actor module to be introspectable.
"""
if not self._module:
with self.injected_context():
path = os.path.abspath(os.path.join(self._repo_dir, self.directory))
for importer, name, is_pkg in pkgutil.iter_modules((path,)):
if not is_pkg:
self._module = importer.find_module(name).load_module(name)
break
def discover(self):
"""
Performs introspection through a subprocess.
:return: Dictionary with discovered items.
"""
if not self._discovery:
self.log.debug("Starting actor discovery in %s", self.directory)
q = Queue(1)
p = Process(target=inspect_actor, args=(self, q))
p.start()
p.join()
if p.exitcode != 0:
self.log.error("Process inspecting actor in %s failed with %d", self.directory, p.exitcode)
raise ActorInspectionFailedError('Inspection of actor in {path} failed'.format(path=self.directory))
result = q.get()
if not result:
self.log.error("Process inspecting actor in %s returned no result", self.directory)
raise ActorInspectionFailedError(
'Inspection of actor in {path} produced no results'.format(path=self.directory))
if len(result) > 1:
self.log.error("Actor in %s returned multiple actors", self.directory)
raise MultipleActorsError(self.directory)
self._discovery = result[0]
for tag in self._discovery['tags']:
if self not in tag.actors:
tag.actors += (self,)
return self._discovery
def __call__(self, messaging=None, logger=None):
return ActorCallContext(definition=self, messaging=messaging, logger=logger)
@property
def dialogs(self):
"""
:return: Tuple of defined dialogs
"""
return self.discover()['dialogs']
@property
def consumes(self):
"""
:return: Tuple of consumed models
"""
return self.discover()['consumes']
@property
def produces(self):
"""
:return: Tuple of produced models
"""
return self.discover()['produces']
@property
def tags(self):
"""
:return: Tuple of tags assigned to the actor
"""
return self.discover()['tags']
@property
def class_name(self):
"""
:return: Actor class name
"""
return self.discover()['class_name']
@property
def name(self):
"""
:return: Actor internal name
"""
return self.discover()['name']
@property
def description(self):
"""
:return: Actor description
"""
return self.discover()['description']
@contextlib.contextmanager
def injected_context(self):
"""
Prepares the actor environment for running the actor.
This includes injecting actor private libraries into :py:mod:`leapp.libraries.actor`
and setting environment variables for private tools and files.
:note: Use with caution.
"""
# Backup of the path variable
path_backup = os.environ.get('PATH', '')
os.environ['PATH'] = ':'.join(path_backup.split(':') + list(
os.path.join(self._repo_dir, self._directory, path) for path in self.tools))
files_backup = os.environ.get('LEAPP_FILES', None)
if self.files:
os.environ['LEAPP_FILES'] = os.path.join(self._repo_dir, self._directory, self.files[0])
tools_backup = os.environ.get('LEAPP_TOOLS', None)
if self.tools:
os.environ['LEAPP_TOOLS'] = os.path.join(self._repo_dir, self._directory, self.tools[0])
# We make a snapshot of the symbols in the module
before = leapp.libraries.actor.__dict__.keys()
# Now we are loading all modules and packages and injecting them at the same time into the modules at hand
to_add = library_loader(leapp.libraries.actor, 'leapp.libraries.actor',
map(lambda x: os.path.join(self._repo_dir, self.directory, x), self.libraries))
backup = {}
# Now we are injecting them into the global sys.modules dictionary and keep a backup of existing ones
# The backup shouldn't be necessary, but just in case
for name, mod in to_add:
if name in sys.modules:
backup[name] = sys.modules[name]
sys.modules[name] = mod
previous_path = os.getcwd()
os.chdir(os.path.join(self._repo_dir, self._directory))
try:
yield
finally:
os.chdir(previous_path)
# Restoration of the PATH environment variable
os.environ['PATH'] = path_backup
# Restoration of the LEAPP_FILES environment variable
if files_backup is not None:
os.environ['LEAPP_FILES'] = files_backup
else:
os.environ.pop('LEAPP_FILES', None)
if tools_backup is not None:
os.environ['LEAPP_TOOLS'] = tools_backup
else:
os.environ.pop('LEAPP_TOOLS', None)
# Remove all symbols in the actor lib before the execution
current = leapp.libraries.actor.__dict__.keys()
added = set(current).difference(before)
for symbol in added:
leapp.libraries.actor.__dict__.pop(symbol)
# Remove all modules from the sys.modules dict or restore from backup if it was there
for name, unused in to_add:
if name in backup:
sys.modules[name] = backup[name]
else:
sys.modules.pop(name)
@property
def directory(self):
"""
:return: The folder path of the actor
"""
return self._directory
@property
def tools(self):
"""
:return: Tuple with path to the tools folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TOOLS, ()))
@property
def libraries(self):
"""
:return: Tuple with path to the libraries folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.LIBRARIES, ()))
@property
def files(self):
"""
:return: Tuple with path to the files folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.FILES, ()))
@property
def tests(self):
"""
:return: Tuple with path to the tests folder of the actor, empty tuple if none
"""
return tuple(self._definitions.get(DefinitionKind.TESTS, ()))
|
views.py
|
import json
import asyncio, threading
import requests
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from .models import CustomAction, AuthorizationMethod, ThingAuthorization
from .thing import Thing, new_things
from .forms import ThingActionForm, ThingSaveActionForm, ThingSettingsForm, ThingEventForm, ThingPropertyForm, ThingObservePropertyForm
def _subscribe(func, id, callback):
"""Spawns a subscription in a new thread to prevent locking up HTTP request
Takes 3 arguments:
func - Function to spawn in new thread, must take id and callback as arguments
id - ID to pass to func
callback - To pass to func
"""
def subscription():
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
event_loop.create_task(func(id, callback))
asyncio.get_event_loop().run_forever()
threading.Thread(target=subscription).start()
def _get_custom_or_action(thing, action_name):
"""Looks up action or custom action with name, returning its definition and data (if available)
Takes 2 arguments:
thing - Thing description object
action_name - Action to look up
"""
try:
custom_action = CustomAction.objects.get(name=action_name, thing_uuid=thing.thing_id)
except CustomAction.DoesNotExist:
if thing.has_action(action_name):
action_id = action_name
data = ''
else:
action_id = None
data = None
else:
action_id = custom_action.action_id
data = custom_action.data
return (action_id, data)
@login_required
def thing_list(request):
"""View to show all things in the directory
If POST, attempts to register a new thing first
"""
if request.method == 'POST':
new_things(request.POST.get('url', ''))
response = requests.get('{}/things'.format(settings.THING_DIRECTORY_HOST), headers={
'Authorization': settings.THING_DIRECTORY_KEY,
})
response.raise_for_status()
context = {
'things': response.json(),
}
return render(request, 'things/list.html', context)
@login_required
def thing_single_properties(request, thing_id):
"""Endpoints relating to properties on a thing
If GET will display all the properties to read
If POST will attempt to read the property with given name.
When observable provided in the request the property is observed
"""
thing = Thing(thing_id)
properties = thing.schema.get('properties', dict())
err = None
success = None
if request.method == 'POST':
# Sets up different validation rules depending on whether reading/observing
if 'observe' in request.POST:
form = ThingObservePropertyForm(request.POST)
else:
form = ThingPropertyForm(request.POST)
if form.is_valid():
if form.cleaned_data['observe'] == True:
# Observation logic
callback_thing = Thing(form.cleaned_data['thing_uuid'])
action_id, data = _get_custom_or_action(callback_thing, form.cleaned_data['custom_action_name'])
def callback(response):
"""Callback to run when property changed. Will only perform action if value matches"""
if form.cleaned_data['condition'] == response.payload.decode():
try:
callback_thing.perform_action(action_id, data)
except:
pass
if action_id is not None:
_subscribe(thing.observe_property, form.cleaned_data['property_id'], callback)
success = 'Property subscribed to'
else:
err = 'Action does not exist'
else:
# Read logic
filtered = {k: v for k, v in properties.items() if k == form.cleaned_data['property_id'] or form.cleaned_data['property_id'] == 'all'}
for k, v in filtered.items():
try:
v['value'] = thing.read_property(k)
except:
v['value'] = ['???']
err = 'One or more properties could not be read'
properties[k] = v
else:
err = 'Invalid data supplied'
context = {
'tab': 'properties',
'uuid': thing_id,
'thing': thing.schema,
'properties': properties,
'err': err,
'success': success,
}
return render(request, 'things/properties.html', context)
def _schema_to_list(schema, prefix=''):
"""Utility function to convert JSON input schema to flat list
Can then be iterated over to create an HTML form
"""
output = list()
if schema['type'] == 'string':
output.append({
'name': prefix + '.value',
'type': 'text',
'label': prefix[1:],
})
elif schema['type'] == 'number':
output.append({
'name': prefix + '.value',
'type': 'number',
'label': prefix[1:],
})
elif schema['type'] == 'object':
# If this is an object, generate the names for each property and append
for k, v in schema['properties'].items():
output = output + _schema_to_list(v, prefix+'.'+k)
return output
def _list_to_data(data):
"""Reverse of schema_to_list, constructs a JSON object from a flat list"""
# If not a form (i.e. no input) just return
if 'value' in data:
return ''
# Remove unneeded fields from POST request
data = {k: v for k, v in data.items() if k[0] == '.'}
output = dict()
for k, v in data.items():
keys = k.split('.')[1:-1] # Work out the path in the JSON tree to this leaf
final_key = keys.pop() # Find the value of the leaf key
# Go through each of the nodes and check they exist in the output structure
current = output
for key in keys:
current.setdefault(key, dict()) # If a node is not in the tree, add it
current = current['key']
current[final_key] = v # Insert the value at the final leaf node
return json.dumps(output)
@login_required
def thing_single_actions(request, thing_id):
"""Endpoints relating to thing actions
If POST will perform an action or custom action (depending on value)
"""
thing = Thing(thing_id)
actions = thing.schema.get('actions', dict())
err = None
success = None
if request.method == 'POST':
# Validation rules different depending on whether custom action is created
if 'save' in request.POST:
form = ThingSaveActionForm(request.POST)
else:
form = ThingActionForm(request.POST)
if form.is_valid():
# Checks if performing custom action, and retrieves payload and action name
if form.cleaned_data['custom_action_id'] is not None:
custom_action = CustomAction.objects.get(id=form.cleaned_data['custom_action_id'])
payload = custom_action.data
action_id = custom_action.action_id
else:
# If not a custom action, retrieve ID and payload from POST data
action_id = form.cleaned_data['action_id']
payload = _list_to_data(request.POST)
# Make the request with the data (custom or not)
try:
thing.perform_action(action_id, payload)
pass
except Exception as e:
err = 'An error occured performing action: ' + str(e)
else:
success = 'Action performed successfully'
# If save box checked (only shows on non-custom actions), save the data into the model
if form.cleaned_data['save'] == True:
custom_action = CustomAction(name=form.cleaned_data['name'], description=form.cleaned_data['description'],
action_id=form.cleaned_data['action_id'], thing_uuid=thing_id, data=payload)
custom_action.save()
else:
err = 'Invalid data supplied'
for k, v in actions.items():
if 'input' in v:
v['input_form'] = _schema_to_list(v['input'])
custom_actions = CustomAction.objects.filter(thing_uuid=thing_id)
context = {
'tab': 'actions',
'uuid': thing_id,
'thing': thing.schema,
'actions': actions,
'custom_actions': custom_actions,
'err': err,
'success': success,
}
return render(request, 'things/actions.html', context)
@login_required
def thing_single_events(request, thing_id):
"""Endpoints related to events
If POST request, subscribe to the event
"""
thing = Thing(thing_id)
events = thing.schema.get('events', dict())
err = None
success = None
if request.method == 'POST':
form = ThingEventForm(request.POST)
if form.is_valid():
callback_thing = Thing(form.cleaned_data['thing_uuid'])
action_id, data = _get_custom_or_action(callback_thing, form.cleaned_data['custom_action_name'])
def callback(response):
"""Callback to run when event is emitted. Will perform the specified action"""
try:
callback_thing.perform_action(action_id, data)
except:
pass
if action_id is not None:
_subscribe(thing.observe_event, form.cleaned_data['event_id'], callback)
success = 'Event subscribed to'
else:
err = 'Invalid callback action specified'
else:
err = 'Invalid data supplied'
context = {
'tab': 'events',
'uuid': thing_id,
'thing': thing.schema,
'events': events,
'err': err,
'success': success,
}
return render(request, 'things/events.html', context)
@login_required
def thing_single_settings(request, thing_id):
"""Endpoints related to thing settings
If POST will update the settings
"""
thing = Thing(thing_id)
err = None
success = None
methods = AuthorizationMethod.objects.all()
try:
thing_method = ThingAuthorization.objects.get(thing_uuid=thing_id)
except (ThingAuthorization.DoesNotExist, ThingAuthorization.MultipleObjectsReturned):
thing_method = None
if request.method == 'POST':
form = ThingSettingsForm(request.POST)
if form.is_valid():
if form.cleaned_data['auth_method_delete'] == True:
# Deletes the auth method if present in the request
if thing_method is not None:
thing_method.delete()
thing_method = None
success = 'Cleared'
else:
if thing_method is None:
thing_method = ThingAuthorization(thing_uuid=thing_id)
try:
thing_method.authorization_method = AuthorizationMethod.objects.get(id=form.cleaned_data['auth_method'])
except AuthorizationMethod.DoesNotExist:
err = 'Unknown authorisation method'
else:
thing_method.save()
success = 'Updated'
else:
err = 'Invalid data supplied'
context = {
'tab': 'settings',
'uuid': thing_id,
'thing': thing.schema,
'methods': methods,
'thing_method': thing_method,
'success': success,
'err': err,
}
return render(request, 'things/settings.html', context)
@login_required
def thing_single_schema(request, thing_id):
"""Endpoint to display thing schema"""
thing = Thing(thing_id)
context = {
'tab': 'schema',
'uuid': thing_id,
'thing': thing.schema,
'thing_pretty': json.dumps(thing.schema, indent=4),
}
return render(request, 'things/schema.html', context)
@login_required
def thing_single_delete(request, thing_id):
"""POST request to delete thing from directory"""
thing = Thing(thing_id)
try:
thing.delete()
except Exception as e:
print(e)
return render(request, 'things/properties.html', {'err': 'Unable to delete Thing'})
return redirect('thing_list')
|
cisd.py
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Solve CISD equation H C = C e where e = E_HF + E_CORR
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import ccsd_rdm
from pyscf.fci import cistring
from pyscf import __config__
BLKMIN = getattr(__config__, 'ci_cisd_blkmin', 4)
def kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8, verbose=logger.INFO):
'''
Run CISD calculation.
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Kwargs:
ci0 : (List of) numpy array(s) (if None it will set)
Initial guess for CISD coeffs.
max_cycle : integer
Maximum number of iterations to converge to CISD solution.
If not converged before, calculation stops without having
converged.
tol : float
Convergence tolerance.
verbose : integer
Level of output (roughly: the higher, the more output).
Returns:
conv : bool
Is it converged?
ecisd : List of floats or float
The lowest :attr:`myci.nroots` eigenvalues.
ci : List of 1D arrays or 1D array
The lowest :attr:`myci.nroots` eigenvectors.
'''
log = logger.new_logger(myci, verbose)
diag = myci.make_diagonal(eris)
# Note that ehf is not the HF energy (see `make_diagonal`).
ehf = diag[0]
diag -= ehf
if ci0 is None:
ci0 = myci.get_init_guess(eris=eris, nroots=myci.nroots, diag=diag)[1]
def op(xs):
return [myci.contract(x, eris) for x in xs]
def precond(x, e, *args):
diagd = diag - (e-myci.level_shift)
diagd[abs(diagd)<1e-8] = 1e-8
return x / diagd
if myci._dot is not None:
nmo = myci.nmo
nocc = myci.nocc
def cisd_dot(x1, x2):
return myci._dot(x1, x2, nmo, nocc)
else:
cisd_dot = numpy.dot
conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,
max_cycle=max_cycle, max_space=myci.max_space,
lindep=myci.lindep, dot=cisd_dot,
nroots=myci.nroots, verbose=log)
if myci.nroots == 1:
conv = conv[0]
ecisd = ecisd[0]
ci = ci[0]
return conv, ecisd, ci
def make_diagonal(myci, eris):
'''
Return diagonal of CISD hamiltonian in Slater determinant basis.
Note that a constant has been substracted of all elements.
The first element is the HF energy (minus the
constant), the next elements are the diagonal elements with singly
excited determinants (<D_i^a|H|D_i^a> within the constant), then
doubly excited determinants (<D_ij^ab|H|D_ij^ab> within the
constant).
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array (size: (1, 1 + #single excitations from HF det
+ #double excitations from HF det))
Diagonal elements of hamiltonian matrix within a constant,
see above.
'''
# DO NOT use eris.mo_energy, it may differ to eris.fock.diagonal()
mo_energy = eris.fock.diagonal()
nmo = mo_energy.size
jdiag = numpy.zeros((nmo,nmo))
kdiag = numpy.zeros((nmo,nmo))
nocc = eris.nocc
nvir = nmo - nocc
jdiag[:nocc,:nocc] = numpy.einsum('iijj->ij', eris.oooo)
kdiag[:nocc,:nocc] = numpy.einsum('jiij->ij', eris.oooo)
jdiag[:nocc,nocc:] = numpy.einsum('iijj->ij', eris.oovv)
kdiag[:nocc,nocc:] = numpy.einsum('ijji->ij', eris.ovvo)
if eris.vvvv is not None and len(eris.vvvv.shape) == 2:
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvir)
#:jdiag1 = numpy.einsum('iijj->ij', eris_vvvv)
diag_idx = numpy.arange(nvir)
diag_idx = diag_idx * (diag_idx + 1) // 2 + diag_idx
for i, ii in enumerate(diag_idx):
jdiag[nocc+i,nocc:] = eris.vvvv[ii][diag_idx]
jksum = (jdiag[:nocc,:nocc] * 2 - kdiag[:nocc,:nocc]).sum()
# Note that ehf is not the HF energy.
ehf = mo_energy[:nocc].sum() * 2 - jksum
e_ia = lib.direct_sum('a-i->ia', mo_energy[nocc:], mo_energy[:nocc])
e_ia -= jdiag[:nocc,nocc:] - kdiag[:nocc,nocc:]
e1diag = ehf + e_ia
e2diag = lib.direct_sum('ia+jb->ijab', e_ia, e_ia)
e2diag += ehf
e2diag += jdiag[:nocc,:nocc].reshape(nocc,nocc,1,1)
e2diag -= jdiag[:nocc,nocc:].reshape(nocc,1,1,nvir)
e2diag -= jdiag[:nocc,nocc:].reshape(1,nocc,nvir,1)
e2diag += jdiag[nocc:,nocc:].reshape(1,1,nvir,nvir)
return numpy.hstack((ehf, e1diag.reshape(-1), e2diag.reshape(-1)))
def contract(myci, civec, eris):
'''
Application of CISD hamiltonian onto civec.
Args:
myci : CISD (inheriting) object
civec : numpy array, same length as a CI vector.
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array, same length as a CI vector.
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
t2 = myci._add_vvvv(c2, eris, t2sym='jiba')
t2 *= .5 # due to t2+t2.transpose(1,0,3,2) in the end
log.timer_debug1('vvvv', *time0)
foo = eris.fock[:nocc,:nocc].copy()
fov = eris.fock[:nocc,nocc:].copy()
fvv = eris.fock[nocc:,nocc:].copy()
t1 = fov * c0
t1 += numpy.einsum('ib,ab->ia', c1, fvv)
t1 -= numpy.einsum('ja,ji->ia', c1, foo)
t2 += lib.einsum('kilj,klab->ijab', _cp(eris.oooo)*.5, c2)
t2 += lib.einsum('ijac,bc->ijab', c2, fvv)
t2 -= lib.einsum('kj,kiba->jiba', foo, c2)
t2 += numpy.einsum('ia,jb->ijab', c1, fov)
unit = nocc*nvir**2 + nocc**2*nvir*3 + 1
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
for p0, p1 in lib.prange(0, nvir, blksize):
eris_oVoV = _cp(_cp(eris.oovv[:,:,p0:p1]).transpose(0,2,1,3))
tmp = lib.einsum('kbjc,ikca->jiba', eris_oVoV, c2)
t2[:,:,p0:p1] -= tmp*.5
t2[:,:,p0:p1] -= tmp.transpose(1,0,2,3)
tmp = None
eris_ovvo = _cp(eris.ovvo[:,p0:p1])
t2[:,:,p0:p1] += eris_ovvo.transpose(0,3,1,2) * (c0*.5)
t1 += numpy.einsum('ia,iabj->jb', c1[:,p0:p1], eris_ovvo) * 2
t1[:,p0:p1] -= numpy.einsum('ib,iajb->ja', c1, eris_oVoV)
ovov = -.5 * eris_oVoV
ovov += eris_ovvo.transpose(3,1,0,2)
eris_oVoV = None
theta = c2[:,:,p0:p1].transpose(2,0,1,3) * 2
theta-= c2[:,:,p0:p1].transpose(2,1,0,3)
for j in range(nocc):
t2[:,j] += lib.einsum('ckb,ckia->iab', ovov[j], theta)
tmp = ovov = None
t1 += numpy.einsum('aijb,ia->jb', theta, fov[:,p0:p1])
eris_ovoo = _cp(eris.ovoo[:,p0:p1])
t1 -= lib.einsum('bjka,jbki->ia', theta, eris_ovoo)
t2[:,:,p0:p1] -= lib.einsum('jbik,ka->jiba', eris_ovoo.conj(), c1)
eris_ovoo = None
eris_ovvv = eris.get_ovvv(slice(None), slice(p0,p1)).conj()
t1 += lib.einsum('cjib,jcba->ia', theta, eris_ovvv)
t2[:,:,p0:p1] += lib.einsum('iacb,jc->ijab', eris_ovvv, c1)
tmp = eris_ovvv = None
#:t2 + t2.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2[i,:i]+= t2[:i,i].transpose(0,2,1)
t2[:i,i] = t2[i,:i].transpose(0,2,1)
t2[i,i] = t2[i,i] + t2[i,i].T
t0 = numpy.einsum('ia,ia->', fov, c1) * 2
t0 += numpy.einsum('iabj,ijab->', eris.ovvo, c2) * 2
t0 -= numpy.einsum('iabj,jiab->', eris.ovvo, c2)
cinew = numpy.hstack((t0, t1.ravel(), t2.ravel()))
return cinew
def amplitudes_to_cisdvec(c0, c1, c2):
return numpy.hstack((c0, c1.ravel(), c2.ravel()))
def cisdvec_to_amplitudes(civec, nmo, nocc):
nvir = nmo - nocc
c0 = civec[0]
c1 = civec[1:nocc*nvir+1].reshape(nocc,nvir)
c2 = civec[nocc*nvir+1:].reshape(nocc,nocc,nvir,nvir)
return c0, c1, c2
def dot(v1, v2, nmo, nocc):
nvir = nmo - nocc
hijab = v2[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
cijab = v1[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
val = numpy.dot(v1, v2) * 2 - v1[0]*v2[0]
val-= numpy.einsum('jiab,ijab->', cijab, hijab)
return val
def t1strs(norb, nelec):
'''Compute the FCI strings (address) for CIS single-excitation amplitudes
and the signs of the coefficients when transferring the reference from
physics vacuum to HF vacuum.
'''
addrs, signs = tn_addrs_signs(norb, nelec, 1)
return addrs, signs
def tn_addrs_signs(norb, nelec, n_excite):
'''Compute the FCI strings (address) for CIS n-excitation amplitudes and
the signs of the coefficients when transferring the reference from physics
vacuum to HF vacuum.
'''
if n_excite > nelec:
print("Warning: Not enough occupied orbitals to excite.")
return [0], [0]
nocc = nelec
hole_strs = cistring.gen_strings4orblist(range(nocc), nocc - n_excite)
# For HF vacuum, hole operators are ordered from low-lying to high-lying
# orbitals. It leads to the opposite string ordering.
hole_strs = hole_strs[::-1]
hole_sum = numpy.zeros(len(hole_strs), dtype=int)
for i in range(nocc):
hole_at_i = (hole_strs & (1 << i)) == 0
hole_sum[hole_at_i] += i
# The hole operators are listed from low-lying to high-lying orbitals
# (from left to right). For i-th (0-based) hole operator, the number of
# orbitals which are higher than i determines the sign. This number
# equals to nocc-(i+1). After removing the highest hole operator, nocc
# becomes nocc-1, the sign for next hole operator j will be associated to
# nocc-1-(j+1). By iteratively calling this procedure, the overall sign
# for annihilating three holes is (-1)**(3*nocc - 6 - sum i)
sign = (-1) ** (n_excite * nocc - n_excite*(n_excite+1)//2 - hole_sum)
particle_strs = cistring.gen_strings4orblist(range(nocc, norb), n_excite)
strs = hole_strs[:,None] ^ particle_strs
addrs = cistring.strs2addr(norb, nocc, strs.ravel())
signs = numpy.vstack([sign] * len(particle_strs)).T.ravel()
return addrs, signs
def to_fcivec(cisdvec, norb, nelec, frozen=None):
'''Convert CISD coefficients to FCI coefficients'''
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
frozen_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
nfroz = 0
elif isinstance(frozen, (int, numpy.integer)):
nfroz = frozen
frozen_mask[:frozen] = True
else:
nfroz = len(frozen)
frozen_mask[frozen] = True
nocc = numpy.count_nonzero(~frozen_mask[:neleca])
nmo = norb - nfroz
nvir = nmo - nocc
c0, c1, c2 = cisdvec_to_amplitudes(cisdvec, nmo, nocc)
t1addr, t1sign = tn_addrs_signs(nmo, nocc, 1)
na = cistring.num_strings(nmo, nocc)
fcivec = numpy.zeros((na,na))
fcivec[0,0] = c0
fcivec[0,t1addr] = fcivec[t1addr,0] = c1.ravel() * t1sign
c2ab = c2.transpose(0,2,1,3).reshape(nocc*nvir,-1)
c2ab = numpy.einsum('i,j,ij->ij', t1sign, t1sign, c2ab)
fcivec[t1addr[:,None],t1addr] = c2ab
if nocc > 1 and nvir > 1:
c2aa = c2 - c2.transpose(1,0,2,3)
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2aa = c2aa[ooidx][:,vvidx[0],vvidx[1]]
t2addr, t2sign = tn_addrs_signs(nmo, nocc, 2)
fcivec[0,t2addr] = fcivec[t2addr,0] = c2aa.ravel() * t2sign
if nfroz == 0:
return fcivec
assert(norb < 63)
strs = cistring.gen_strings4orblist(range(norb), neleca)
na = len(strs)
count = numpy.zeros(na, dtype=int)
parity = numpy.zeros(na, dtype=bool)
core_mask = numpy.ones(na, dtype=bool)
# During the loop, count saves the number of occupied orbitals that
# lower (with small orbital ID) than the present orbital i.
# Moving all the frozen orbitals to the beginning of the orbital list
# (before the occupied orbitals) leads to parity odd (= True, with
# negative sign) or even (= False, with positive sign).
for i in range(norb):
if frozen_mask[i]:
if i < neleca:
# frozen occupied orbital should be occupied
core_mask &= (strs & (1 << i)) != 0
parity ^= (count & 1) == 1
else:
# frozen virtual orbital should not be occupied.
# parity is not needed since it's unoccupied
core_mask &= (strs & (1 << i)) == 0
else:
count += (strs & (1 << i)) != 0
sub_strs = strs[core_mask & (count == nocc)]
addrs = cistring.strs2addr(norb, neleca, sub_strs)
fcivec1 = numpy.zeros((na,na))
fcivec1[addrs[:,None],addrs] = fcivec
fcivec1[parity,:] *= -1
fcivec1[:,parity] *= -1
return fcivec1
def from_fcivec(ci0, norb, nelec, frozen=None):
'''Extract CISD coefficients from FCI coefficients'''
if not (frozen is None or frozen == 0):
raise NotImplementedError
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
nocc = neleca
nvir = norb - nocc
t1addr, t1sign = t1strs(norb, nocc)
c0 = ci0[0,0]
c1 = ci0[0,t1addr] * t1sign
c2 = numpy.einsum('i,j,ij->ij', t1sign, t1sign, ci0[t1addr[:,None],t1addr])
c1 = c1.reshape(nocc,nvir)
c2 = c2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return amplitudes_to_cisdvec(c0, c1, c2)
def overlap(cibra, ciket, nmo, nocc, s=None):
'''Overlap between two CISD wavefunctions.
Args:
s : 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is None:
return dot(cibra, ciket, nmo, nocc)
DEBUG = True
nvir = nmo - nocc
nov = nocc * nvir
bra0, bra1, bra2 = cisdvec_to_amplitudes(cibra, nmo, nocc)
ket0, ket1, ket2 = cisdvec_to_amplitudes(ciket, nmo, nocc)
# Sort the ket orbitals to make the orbitals in bra one-one mapt to orbitals
# in ket.
if ((not DEBUG) and
abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
ket_orb_idx = numpy.where(abs(s) > 0.9)[1]
s = s[:,ket_orb_idx]
oidx = ket_orb_idx[:nocc]
vidx = ket_orb_idx[nocc:] - nocc
ket1 = ket1[oidx[:,None],vidx]
ket2 = ket2[oidx[:,None,None,None],oidx[:,None,None],vidx[:,None],vidx]
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
bra2aa = bra2 - bra2.transpose(1,0,2,3)
bra2aa = lib.take_2d(bra2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
ket2aa = ket2 - ket2.transpose(1,0,2,3)
ket2aa = lib.take_2d(ket2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
occlist0 = numpy.arange(nocc).reshape(1,nocc)
occlists = numpy.repeat(occlist0, 1+nov+bra2aa.size, axis=0)
occlist0 = occlists[:1]
occlist1 = occlists[1:1+nov]
occlist2 = occlists[1+nov:]
ia = 0
for i in range(nocc):
for a in range(nocc, nmo):
occlist1[ia,i] = a
ia += 1
ia = 0
for i in range(nocc):
for j in range(i):
for a in range(nocc, nmo):
for b in range(nocc, a):
occlist2[ia,i] = a
occlist2[ia,j] = b
ia += 1
na = len(occlists)
if DEBUG:
trans = numpy.empty((na,na))
for i, idx in enumerate(occlists):
s_sub = s[idx].T.copy()
minors = s_sub[occlists]
trans[i,:] = numpy.linalg.det(minors)
# Mimic the transformation einsum('ab,ap->pb', FCI, trans).
# The wavefunction FCI has the [excitation_alpha,excitation_beta]
# representation. The zero blocks like FCI[S_alpha,D_beta],
# FCI[D_alpha,D_beta], are explicitly excluded.
bra_mat = numpy.zeros((na,na))
bra_mat[0,0] = bra0
bra_mat[0,1:1+nov] = bra_mat[1:1+nov,0] = bra1.ravel()
bra_mat[0,1+nov:] = bra_mat[1+nov:,0] = bra2aa.ravel()
bra_mat[1:1+nov,1:1+nov] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_mat = numpy.zeros((na,na))
ket_mat[0,0] = ket0
ket_mat[0,1:1+nov] = ket_mat[1:1+nov,0] = ket1.ravel()
ket_mat[0,1+nov:] = ket_mat[1+nov:,0] = ket2aa.ravel()
ket_mat[1:1+nov,1:1+nov] = ket2.transpose(0,2,1,3).reshape(nov,nov)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_mat, trans, trans, ket_mat)
else:
nov1 = 1 + nov
noovv = bra2aa.size
bra_SS = numpy.zeros((nov1,nov1))
bra_SS[0,0] = bra0
bra_SS[0,1:] = bra_SS[1:,0] = bra1.ravel()
bra_SS[1:,1:] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_SS = numpy.zeros((nov1,nov1))
ket_SS[0,0] = ket0
ket_SS[0,1:] = ket_SS[1:,0] = ket1.ravel()
ket_SS[1:,1:] = ket2.transpose(0,2,1,3).reshape(nov,nov)
trans_SS = numpy.empty((nov1,nov1))
trans_SD = numpy.empty((nov1,noovv))
trans_DS = numpy.empty((noovv,nov1))
occlist01 = occlists[:nov1]
for i, idx in enumerate(occlist01):
s_sub = s[idx].T.copy()
minors = s_sub[occlist01]
trans_SS[i,:] = numpy.linalg.det(minors)
minors = s_sub[occlist2]
trans_SD[i,:] = numpy.linalg.det(minors)
s_sub = s[:,idx].copy()
minors = s_sub[occlist2]
trans_DS[:,i] = numpy.linalg.det(minors)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_SS, trans_SS, trans_SS, ket_SS)
ovlp+= lib.einsum('ab,a ,bq, q->', bra_SS, trans_SS[:,0], trans_SD, ket2aa.ravel())
ovlp+= lib.einsum('ab,ap,b ,p ->', bra_SS, trans_SD, trans_SS[:,0], ket2aa.ravel())
ovlp+= lib.einsum(' b, p,bq,pq->', bra2aa.ravel(), trans_SS[0,:], trans_DS, ket_SS)
ovlp+= lib.einsum(' b, p,b ,p ->', bra2aa.ravel(), trans_SD[0,:], trans_DS[:,0],
ket2aa.ravel())
ovlp+= lib.einsum('a ,ap, q,pq->', bra2aa.ravel(), trans_DS, trans_SS[0,:], ket_SS)
ovlp+= lib.einsum('a ,a , q, q->', bra2aa.ravel(), trans_DS[:,0], trans_SD[0,:],
ket2aa.ravel())
# FIXME: whether to approximate the overlap between double excitation coefficients
if numpy.linalg.norm(bra2aa)*numpy.linalg.norm(ket2aa) < 1e-4:
# Skip the overlap if coefficients of double excitation are small enough
pass
if (abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
# If the overlap matrix close to identity enough, use the <D|D'> overlap
# for orthogonal single-particle basis to approximate the overlap
# for non-orthogonal basis.
ovlp+= numpy.dot(bra2aa.ravel(), ket2aa.ravel()) * trans_SS[0,0] * 2
else:
from multiprocessing import sharedctypes, Process
buf_ctypes = sharedctypes.RawArray('d', noovv)
trans_ket = numpy.ndarray(noovv, buffer=buf_ctypes)
def trans_dot_ket(i0, i1):
for i in range(i0, i1):
s_sub = s[occlist2[i]].T.copy()
minors = s_sub[occlist2]
trans_ket[i] = numpy.linalg.det(minors).dot(ket2aa.ravel())
nproc = lib.num_threads()
if nproc > 1:
seg = (noovv+nproc-1) // nproc
ps = []
for i0,i1 in lib.prange(0, noovv, seg):
p = Process(target=trans_dot_ket, args=(i0,i1))
ps.append(p)
p.start()
[p.join() for p in ps]
else:
trans_dot_ket(0, noovv)
ovlp+= numpy.dot(bra2aa.ravel(), trans_ket) * trans_SS[0,0] * 2
return ovlp
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced one-particle density matrix in MO basis (the occupied-virtual
blocks from the orbital response contribution are not included).
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return ccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
f = lib.H5TmpFile()
d2 = _gamma2_outcore(myci, civec, nmo, nocc, f, False)
return ccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
dvo = c0.conj() * c1.T
dvo += numpy.einsum('jb,ijab->ai', c1.conj(), c2) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1.conj(), c2)
dov = dvo.T.conj()
theta = c2*2 - c2.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1.conj(), c1)
doo -= lib.einsum('ijab,ikab->jk', c2.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1, c1.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2.conj())
return doo, dov, dvo, dvv
def _gamma2_intermediates(myci, civec, nmo, nocc, compress_vvvv=False):
f = lib.H5TmpFile()
_gamma2_outcore(myci, civec, nmo, nocc, f, compress_vvvv)
d2 = (f['dovov'][:], f['dvvvv'][:], f['doooo'][:], f['doovv'][:],
f['dovvo'][:], None, f['dovvv'][:], f['dooov'][:])
return d2
def _gamma2_outcore(myci, civec, nmo, nocc, h5fobj, compress_vvvv=False):
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
nvir_pair = nvir * (nvir+1) // 2
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
h5fobj['dovov'] = (2*c0*c2.conj().transpose(0,2,1,3) -
c0*c2.conj().transpose(1,2,0,3))
doooo = lib.einsum('ijab,klab->ijkl', c2.conj(), c2)
h5fobj['doooo'] = doooo.transpose(0,2,1,3) - doooo.transpose(1,2,0,3)*.5
doooo = None
dooov = -lib.einsum('ia,klac->klic', c1*2, c2.conj())
h5fobj['dooov'] = dooov.transpose(0,2,1,3)*2 - dooov.transpose(1,2,0,3)
dooov = None
#:dvovv = numpy.einsum('ia,ikcd->akcd', c1, c2) * 2
#:dvvvv = lib.einsum('ijab,ijcd->abcd', c2, c2)
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
unit = max(nocc**2*nvir*2+nocc*nvir**2*3 + 1, nvir**3*2+nocc*nvir**2 + 1)
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('rdm intermediates: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
dtype = numpy.result_type(civec).char
dovvv = h5fobj.create_dataset('dovvv', (nocc,nvir,nvir,nvir), dtype,
chunks=(nocc,min(nocc,nvir),1,nvir))
if compress_vvvv:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), dtype)
else:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir,nvir,nvir,nvir), dtype)
for (p0, p1) in lib.prange(0, nvir, blksize):
theta = c2[:,:,p0:p1] - c2[:,:,p0:p1].transpose(1,0,2,3) * .5
gvvvv = lib.einsum('ijab,ijcd->abcd', theta.conj(), c2)
if compress_vvvv:
# symmetrize dvvvv because it does not affect the results of cisd_grad
# dvvvv = (dvvvv+dvvvv.transpose(0,1,3,2)) * .5
# dvvvv = (dvvvv+dvvvv.transpose(1,0,2,3)) * .5
# now dvvvv == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
tmp[:] = gvvvv[i].conj().transpose(1,0,2)
lib.pack_tril(tmp+tmp.transpose(0,2,1), out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
tmp = tmpvvvv = None
else:
for i in range(p0, p1):
dvvvv[i] = gvvvv[i-p0].conj().transpose(1,0,2)
gvovv = numpy.einsum('ia,ikcd->akcd', c1[:,p0:p1].conj()*2, c2)
gvovv = gvovv.conj()
dovvv[:,:,p0:p1] = gvovv.transpose(1,3,0,2)*2 - gvovv.transpose(1,2,0,3)
theta = c2*2 - c2.transpose(1,0,2,3)
doovv = numpy.einsum('ia,kc->ikca', c1.conj(), -c1)
doovv -= lib.einsum('kjcb,kica->jiab', c2.conj(), theta)
doovv -= lib.einsum('ikcb,jkca->ijab', c2.conj(), theta)
h5fobj['doovv'] = doovv
doovv = None
dovvo = lib.einsum('ikac,jkbc->iabj', theta.conj(), theta)
dovvo += numpy.einsum('ia,kc->iack', c1.conj(), c1) * 2
h5fobj['dovvo'] = dovvo
theta = dovvo = None
dvvov = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], dvvov , h5fobj['dovvv'], h5fobj['dooov'])
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
r'''
Spin-traced one-particle transition density matrix in MO basis.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
dvo = c0bra.conj() * c1ket.T
dvo += numpy.einsum('jb,ijab->ai', c1bra.conj(), c2ket) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1bra.conj(), c2ket)
dov = c0ket * c1bra.conj()
dov += numpy.einsum('jb,ijab->ia', c1ket, c2bra.conj()) * 2
dov -= numpy.einsum('jb,ijba->ia', c1ket, c2bra.conj())
theta = c2ket*2 - c2ket.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1bra.conj(), c1ket)
doo -= lib.einsum('ijab,ikab->jk', c2bra.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1ket, c1bra.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2bra.conj())
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo * 2
dm1[:nocc,nocc:] = dov * 2
dm1[nocc:,:nocc] = dvo * 2
dm1[nocc:,nocc:] = dvv * 2
norm = dot(cibra, ciket, nmo, nocc)
dm1[numpy.diag_indices(nocc)] += 2 * norm
if myci.frozen is not None:
nmo = myci.mo_occ.size
nocc = numpy.count_nonzero(myci.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 2 * norm
moidx = numpy.where(myci.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
return dm1
def as_scanner(ci):
'''Generating a scanner/solver for CISD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).as_scanner()
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(ci, lib.SinglePointScanner):
return ci
logger.info(ci, 'Set %s as a scanner', ci.__class__)
class CISD_Scanner(ci.__class__, lib.SinglePointScanner):
def __init__(self, ci):
self.__dict__.update(ci.__dict__)
self._scf = ci._scf.as_scanner()
def __call__(self, mol_or_geom, ci0=None, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
if getattr(self.ci, 'size', 0) != self.vector_size():
self.ci = None
if ci0 is None:
# FIXME: Whether to use the initial guess from last step?
# If root flips, large errors may be found in the solutions
ci0 = self.ci
self.kernel(ci0, **kwargs)[0]
return self.e_tot
return CISD_Scanner(ci)
class CISD(lib.StreamObject):
'''restricted CISD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to
:class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-9.
max_cycle : int
max number of iterations. Default is 50.
max_space : int
Davidson diagonalization space size. Default is 12.
direct : bool
AO-direct CISD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CI
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CI calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> myci = ci.CISD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> myci.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CISD converged or not
e_corr : float
CISD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
ci :
CI wavefunction coefficients
'''
conv_tol = getattr(__config__, 'ci_cisd_CISD_conv_tol', 1e-9)
max_cycle = getattr(__config__, 'ci_cisd_CISD_max_cycle', 50)
max_space = getattr(__config__, 'ci_cisd_CISD_max_space', 12)
lindep = getattr(__config__, 'ci_cisd_CISD_lindep', 1e-14)
level_shift = getattr(__config__, 'ci_cisd_CISD_level_shift', 0) # in preconditioner
direct = getattr(__config__, 'ci_cisd_CISD_direct', False)
async_io = getattr(__config__, 'ci_cisd_CISD_async_io', True)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if 'dft' in str(mf.__module__):
raise RuntimeError('CISD Warning: The first argument mf is a DFT object. '
'CISD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code below:\n'
' mf_hf = scf.RHF(mol)\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.nroots = 1
self.frozen = frozen
self.chkfile = mf.chkfile
##################################################
# don't modify the following attributes, they are not input options
self.converged = False
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.e_corr = None
self.emp2 = None
self.ci = None
self._nocc = None
self._nmo = None
keys = set(('conv_tol', 'max_cycle', 'max_space', 'lindep',
'level_shift', 'direct'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CISD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('max_cycle = %d', self.max_cycle)
log.info('max_space = %d', self.max_space)
log.info('lindep = %d', self.lindep)
log.info('nroots = %d', self.nroots)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def e_tot(self):
return numpy.asarray(self.e_corr) + self._scf.e_tot
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def vector_size(self):
'''The size of the vector which was returned from
:func:`amplitudes_to_cisdvec`
'''
nocc = self.nocc
nvir = self.nmo - nocc
return 1 + nocc*nvir + (nocc*nvir)**2
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = ccsd.get_nocc
get_nmo = ccsd.get_nmo
get_frozen_mask = ccsd.get_frozen_mask
def kernel(self, ci0=None, eris=None):
return self.cisd(ci0, eris)
def cisd(self, ci0=None, eris=None):
if eris is None:
eris = self.ao2mo(self.mo_coeff)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_corr, self.ci = \
kernel(self, eris, ci0, max_cycle=self.max_cycle,
tol=self.conv_tol, verbose=self.verbose)
self._finalize()
return self.e_corr, self.ci
def _finalize(self):
citype = self.__class__.__name__
if numpy.all(self.converged):
logger.info(self, '%s converged', citype)
else:
logger.info(self, '%s not converged', citype)
if self.nroots > 1:
for i,e in enumerate(self.e_tot):
logger.note(self, '%s root %d E = %.16g', citype, i, e)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
citype, self.e_tot, self.e_corr)
return self
def get_init_guess(self, eris=None, nroots=1, diag=None):
'''
MP2 energy and MP2 initial guess(es) for CISD coefficients.
Kwargs:
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
nroots : integer
Number of CISD solutions to be found.
diag : numpy array (1D)
e.g. CISD Hamiltonian diagonal in Slater determinant
space with HF energy subtracted.
Returns:
Tuple of float and numpy array or
tuple of float and list of numpy arrays (if nroots > 1)
MP2 energy and initial guess(es) for CISD coefficients.
'''
if eris is None: eris = self.ao2mo(self.mo_coeff)
nocc = self.nocc
mo_e = eris.mo_energy
e_ia = lib.direct_sum('i-a->ia', mo_e[:nocc], mo_e[nocc:])
ci0 = 1
ci1 = eris.fock[:nocc,nocc:] / e_ia
eris_ovvo = _cp(eris.ovvo)
ci2 = 2 * eris_ovvo.transpose(0,3,1,2)
ci2 -= eris_ovvo.transpose(0,3,2,1)
ci2 /= lib.direct_sum('ia,jb->ijab', e_ia, e_ia)
self.emp2 = numpy.einsum('ijab,iabj', ci2, eris_ovvo)
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
if abs(self.emp2) < 1e-3 and abs(ci1).sum() < 1e-3:
# To avoid ci1 being stuck at local minimum
ci1 = 1e-1 / e_ia
ci_guess = amplitudes_to_cisdvec(ci0, ci1, ci2)
if nroots > 1:
civec_size = ci_guess.size
dtype = ci_guess.dtype
nroots = min(ci1.size+1, nroots) # Consider Koopmans' theorem only
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1.size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
contract = contract
make_diagonal = make_diagonal
def _dot(self, x1, x2, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return dot(x1, x2, nmo, nocc)
def ao2mo(self, mo_coeff=None):
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return ccsd._make_eris_incore(self, mo_coeff)
if getattr(self._scf, 'with_df', None):
logger.warn(self, 'CISD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CISD calculations')
return ccsd._make_df_eris_outcore(self, mo_coeff)
return ccsd._make_eris_outcore(self, mo_coeff)
def _add_vvvv(self, c2, eris, out=None, t2sym=None):
return ccsd._add_vvvv(self, None, c2, eris, out, False, t2sym)
def to_fcivec(self, cisdvec, norb=None, nelec=None, frozen=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return to_fcivec(cisdvec, norb, nelec, frozen)
def from_fcivec(self, fcivec, norb=None, nelec=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return from_fcivec(fcivec, norb, nelec)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
as_scanner = as_scanner
def dump_chk(self, ci=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if ci is None: ci = self.ci
if frozen is None: frozen = self.frozen
# "None" cannot be serialized by the chkfile module
if frozen is None:
frozen = 0
ci_chk = {'e_corr': self.e_corr,
'ci': ci,
'frozen': frozen}
if mo_coeff is not None: ci_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: ci_chk['mo_occ'] = mo_occ
if self._nmo is not None: ci_chk['_nmo'] = self._nmo
if self._nocc is not None: ci_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'cisd', ci_chk)
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
def density_fit(self):
raise NotImplementedError
def nuc_grad_method(self):
from pyscf.grad import cisd
return cisd.Gradients(self)
class RCISD(CISD):
pass
from pyscf import scf
scf.hf.RHF.CISD = lib.class_as_method(RCISD)
scf.rohf.ROHF.CISD = None
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
myci = CISD(mf)
eris = ccsd._make_eris_outcore(myci, mf.mo_coeff)
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmo = myci.nmo
nocc = myci.nocc
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
h1e = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
h2e = ao2mo.kernel(mf._eri, mf.mo_coeff)
h2e = ao2mo.restore(1, h2e, nmo)
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', h2e, rdm2) * .5)
print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
print(abs(rdm1 - numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)).sum())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.