code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from ._StabData import *
from ._AccelData import *
from ._MotorData import *
from ._GyroData import *
|
WSCU/crazyflie_ros
|
src/crazyflie/msg/__init__.py
|
Python
|
gpl-2.0
| 102
|
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez vud1@sindominio.net
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import logging
from .lib.xmlUtils import XMLParser
from .gui.windowextensions import WindowExtensions
class Extension:
def __init__(self, data_path = None, parent = None):
self.data_path=data_path
self.parent = parent
self.pytrainer_main = parent
def getActiveExtensions(self):
retorno = []
for extension in self.getExtensionList():
if self.getExtensionInfo(extension[0])[2] == "1":
retorno.append(extension[0])
return retorno
def manageExtensions(self):
ExtensionList = self.getExtensionList()
windowextension = WindowExtensions(self.data_path, self)
windowextension.setList(ExtensionList)
windowextension.run()
def getExtensionList(self):
extensiondir = self.data_path+"/extensions"
extensionList = []
for extension in os.listdir(extensiondir):
extensionxmlfile = extensiondir+"/"+extension+"/conf.xml"
if os.path.isfile(extensionxmlfile):
extensioninfo = XMLParser(extensionxmlfile)
name = extensioninfo.getValue("pytrainer-extension","name")
description = extensioninfo.getValue("pytrainer-extension","description")
extensionList.append((extensiondir+"/"+extension,name,description))
return extensionList
def getExtensionInfo(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
name = info.getValue("pytrainer-extension","name")
description = info.getValue("pytrainer-extension","description")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
helpfile = pathExtension+"/"+info.getValue("pytrainer-extension","helpfile")
type = info.getValue("pytrainer-extension","type")
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
status = 0
else:
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
status = info.getValue("pytrainer-extension","status")
#print name,description,status,helpfile,type
return name,description,status,helpfile,type
def getExtensionConfParams(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir
params = {}
if not os.path.isfile(extensiondir+"/"+code+"/conf.xml"):
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
else:
prefs = info.getAllValues("conf-values")
prefs.append(("status","0"))
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
for pref in prefs:
params[pref[0]] = info.getValue("pytrainer-extension",pref[0])
#params.append((pref[0],info.getValue("pytrainer-extension",pref[0])))
return params
def setExtensionConfParams(self,pathExtension,savedOptions):
info = XMLParser(pathExtension+"/conf.xml")
code = info.getValue("pytrainer-extension","extensioncode")
extensiondir = self.pytrainer_main.profile.extensiondir+"/"+code
if not os.path.isdir(extensiondir):
os.mkdir(extensiondir)
if not os.path.isfile(extensiondir+"/conf.xml"):
savedOptions.append(("status","0"))
info = XMLParser(extensiondir+"/conf.xml")
info.createXMLFile("pytrainer-extension",savedOptions)
def loadExtension(self,pathExtension):
info = XMLParser(pathExtension+"/conf.xml")
txtbutton = info.getValue("pytrainer-extension","extensionbutton")
name = info.getValue("pytrainer-extension","name")
type = info.getValue("pytrainer-extension","type")
#print "Loading Extension %s" %name
return txtbutton,pathExtension,type
def getCodeConfValue(self,code,value):
extensiondir = self.pytrainer_main.profile.extensiondir
info = XMLParser(extensiondir+"/"+code+"/conf.xml")
return info.getValue("pytrainer-extension",value)
def importClass(self, pathExtension):
logging.debug('>>')
info = XMLParser(pathExtension+"/conf.xml")
#import extension
extension_dir = os.path.realpath(pathExtension)
extension_filename = info.getValue("pytrainer-extension","executable")
extension_classname = info.getValue("pytrainer-extension","extensioncode")
extension_type = info.getValue("pytrainer-extension","type")
options = self.getExtensionConfParams(pathExtension)
logging.debug("Extension Filename: %s", extension_filename )
logging.debug("Extension Classname: %s", extension_classname)
logging.debug("Extension Type: %s", extension_type)
logging.debug("Extension options: %s", options)
sys.path.insert(0, extension_dir)
module = __import__(extension_filename)
extensionMain = getattr(module, extension_classname)
logging.debug('<<')
return extensionMain(parent=self, pytrainer_main=self.parent, conf_dir=self.pytrainer_main.profile.confdir, options=options)
|
pytrainer/pytrainer
|
pytrainer/extension.py
|
Python
|
gpl-2.0
| 5,489
|
'''
Created on Dec 31, 2011
@author: fli
'''
import logging
import base64
import urllib
import simplejson
from urllib2 import urlopen, URLError, Request
from weibonews.utils.decorators import perf_logging
_LOGGER = logging.getLogger('weibonews.external')
_ACCESS_URL_FORMAT = "http://%s/image/%s/%s"
_INFO_URL_FORMAT = "http://%s/imageinfo/"
_PARAM_ENCODING_FORMAT = "w=%d&q=%d"
_DEFAULT_IMAGE_QUALITY = 80
_SAFE_QUOTE = '/?=&:#%'
_CROP_PARAM_VALUE = "crop=1&rate=16d10&ff=1"
def get_image_access_url(server, url, width=0, quality=_DEFAULT_IMAGE_QUALITY, refer=None, cut=False):
if url:
if isinstance(url, unicode):
url = urllib.quote(url.encode('utf8'), _SAFE_QUOTE)
if refer is not None:
url = _encode_refer(url, refer)
url_encoded = base64.urlsafe_b64encode(url)
if cut:
param_encoded = base64.urlsafe_b64encode(_CROP_PARAM_VALUE)
else:
param_encoded = base64.urlsafe_b64encode(_PARAM_ENCODING_FORMAT % (width, quality))
return _ACCESS_URL_FORMAT % (server, url_encoded, param_encoded)
else:
return url
@perf_logging
def request_images_size(server, url_list, refer=None):
if url_list:
request_urls = []
for url in url_list:
if not url:
continue
if refer is not None:
url = _encode_refer(url, refer)
request_urls.append(url)
return _get_image_details(server, request_urls)
return None
@perf_logging
def _encode_refer(url, refer):
headers = {'Referer': refer}
url_params = {'headers': urllib.urlencode(headers)}
return '|'.join((url, urllib.urlencode(url_params)))
@perf_logging
def _get_image_details(server, urls):
if isinstance(urls, list):
data = {"u": '[%s]' % ','.join(['"%s"' % u for u in urls])}
data = urllib.urlencode(data)
server_url = _INFO_URL_FORMAT % server
request = Request(server_url, data)
request.add_header("Content-type", "application/x-www-form-urlencoded")
try:
page = urlopen(request).read()
except URLError, err:
_LOGGER.error("[Image Compress Service] Error: %s, url %s, data %s" % (err, server_url, data))
return None
result = simplejson.loads(page)
_LOGGER.info("[Image Compress Service] Get %d result from %d request from %s" % (len(result['data']), len(urls), server_url))
return result['data']
return None
|
vispeal/VoteHelper
|
weibonews/weibonews/utils/image.py
|
Python
|
gpl-2.0
| 2,495
|
#
# pyfeyner - a simple Python interface for making Feynman diagrams.
# Copyright (C) 2005-2010 Andy Buckley, Georg von Hippel
# Copyright (C) 2013 Ismo Toijala
#
# pyfeyner is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# pyfeyner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with pyfeyner; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from pyfeyner import *
from pyfeyner.user import *
fd = FeynDiagram()
pt_in = Vertex(0, 0)
pt_out = Vertex(0, 6)
#pt_out = Vertex(6, 0)
vtx = Vertex(3, 3)
f = Fermion(pt_in, pt_out).arcThru(vtx)
fd.draw("test-bend90a.pdf")
|
itoijala/pyfeyner
|
tests/test-bend90a.py
|
Python
|
gpl-2.0
| 1,085
|
from admin_views import *
|
grapesmoker/geogame2
|
geogame_core/admin_views/__init__.py
|
Python
|
gpl-2.0
| 25
|
"""
Command line interface for cobbler.
Copyright 2006-2009, Red Hat, Inc and Others
Michael DeHaan <michael.dehaan AT gmail>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import sys
import xmlrpclib
import traceback
import optparse
import exceptions
import time
import os
import utils
import module_loader
import item_distro
import item_profile
import item_system
import item_repo
import item_image
import item_mgmtclass
import item_package
import item_file
import settings
OBJECT_ACTIONS_MAP = {
"distro" : "add copy edit find list remove rename report".split(" "),
"profile" : "add copy dumpvars edit find getks list remove rename report".split(" "),
"system" : "add copy dumpvars edit find getks list remove rename report poweron poweroff powerstatus reboot".split(" "),
"image" : "add copy edit find list remove rename report".split(" "),
"repo" : "add copy edit find list remove rename report".split(" "),
"mgmtclass" : "add copy edit find list remove rename report".split(" "),
"package" : "add copy edit find list remove rename report".split(" "),
"file" : "add copy edit find list remove rename report".split(" "),
"setting" : "edit report".split(" "),
"signature" : "reload report update".split(" "),
}
OBJECT_TYPES = OBJECT_ACTIONS_MAP.keys()
# would like to use from_iterable here, but have to support python 2.4
OBJECT_ACTIONS = []
for actions in OBJECT_ACTIONS_MAP.values():
OBJECT_ACTIONS += actions
DIRECT_ACTIONS = "aclsetup buildiso import list replicate report reposync sync validateks version".split()
####################################################
def report_items(remote, otype):
if otype == "setting":
items = remote.get_settings()
keys = items.keys()
keys.sort()
for key in keys:
item = {'name':key, 'value':items[key]}
report_item(remote,otype,item=item)
elif otype == "signature":
items = remote.get_signatures()
total_breeds = 0
total_sigs = 0
if items.has_key("breeds"):
print "Currently loaded signatures:"
bkeys = items["breeds"].keys()
bkeys.sort()
total_breeds = len(bkeys)
for breed in bkeys:
print "%s:" % breed
oskeys = items["breeds"][breed].keys()
oskeys.sort()
if len(oskeys) > 0:
total_sigs += len(oskeys)
for osversion in oskeys:
print "\t%s" % osversion
else:
print "\t(none)"
print "\n%d breeds with %d total signatures loaded" % (total_breeds,total_sigs)
else:
print "No breeds found in the signature, a signature update is recommended"
sys.exit(1)
else:
items = remote.get_items(otype)
for x in items:
report_item(remote,otype,item=x)
def report_item(remote,otype,item=None,name=None):
if item is None:
if otype == "setting":
cur_settings = remote.get_settings()
try:
item = {'name':name, 'value':cur_settings[name]}
except:
print "Setting not found: %s" % name
sys.exit(1)
elif otype == "signature":
items = remote.get_signatures()
total_sigs = 0
if items.has_key("breeds"):
print "Currently loaded signatures:"
if items["breeds"].has_key(name):
print "%s:" % name
oskeys = items["breeds"][name].keys()
oskeys.sort()
if len(oskeys) > 0:
total_sigs += len(oskeys)
for osversion in oskeys:
print "\t%s" % osversion
else:
print "\t(none)"
print "\nBreed '%s' has %d total signatures" % (name,total_sigs)
else:
print "No breed named '%s' found" % name
sys.exit(1)
else:
print "No breeds found in the signature, a signature update is recommended"
sys.exit(1)
return
else:
item = remote.get_item(otype, name)
if item == "~":
print "No %s found: %s" % (otype, name)
sys.exit(1)
if otype == "distro":
data = utils.printable_from_fields(item, item_distro.FIELDS)
elif otype == "profile":
data = utils.printable_from_fields(item, item_profile.FIELDS)
elif otype == "system":
data = utils.printable_from_fields(item, item_system.FIELDS)
elif otype == "repo":
data = utils.printable_from_fields(item, item_repo.FIELDS)
elif otype == "image":
data = utils.printable_from_fields(item, item_image.FIELDS)
elif otype == "mgmtclass":
data = utils.printable_from_fields(item,item_mgmtclass.FIELDS)
elif otype == "package":
data = utils.printable_from_fields(item,item_package.FIELDS)
elif otype == "file":
data = utils.printable_from_fields(item,item_file.FIELDS)
elif otype == "setting":
data = "%-40s: %s" % (item['name'],item['value'])
print data
def list_items(remote,otype):
items = remote.get_item_names(otype)
items.sort()
for x in items:
print " %s" % x
def n2s(data):
"""
Return spaces for None
"""
if data is None:
return ""
return data
def opt(options, k, defval=""):
"""
Returns an option from an Optparse values instance
"""
try:
data = getattr(options, k)
except:
# FIXME: debug only
# traceback.print_exc()
return defval
return n2s(data)
class BootCLI:
def __init__(self):
# Load server ip and ports from local config
self.url_cobbler_api = utils.local_get_cobbler_api_url()
self.url_cobbler_xmlrpc = utils.local_get_cobbler_xmlrpc_url()
# FIXME: allow specifying other endpoints, and user+pass
self.parser = optparse.OptionParser()
self.remote = xmlrpclib.Server(self.url_cobbler_api)
self.shared_secret = utils.get_shared_secret()
def start_task(self, name, options):
options = utils.strip_none(vars(options), omit_none=True)
fn = getattr(self.remote, "background_%s" % name)
return fn(options, self.token)
def get_object_type(self, args):
"""
If this is a CLI command about an object type, e.g. "cobbler distro add", return the type, like "distro"
"""
if len(args) < 2:
return None
elif args[1] in OBJECT_TYPES:
return args[1]
return None
def get_object_action(self, object_type, args):
"""
If this is a CLI command about an object type, e.g. "cobbler distro add", return the action, like "add"
"""
if object_type is None or len(args) < 3:
return None
if args[2] in OBJECT_ACTIONS_MAP[object_type]:
return args[2]
return None
def get_direct_action(self, object_type, args):
"""
If this is a general command, e.g. "cobbler hardlink", return the action, like "hardlink"
"""
if object_type is not None:
return None
elif len(args) < 2:
return None
elif args[1] == "--help":
return None
elif args[1] == "--version":
return "version"
else:
return args[1]
def check_setup(self):
"""
Detect permissions and service accessibility problems and provide
nicer error messages for them.
"""
s = xmlrpclib.Server(self.url_cobbler_xmlrpc)
try:
s.ping()
except:
print >> sys.stderr, "cobblerd does not appear to be running/accessible"
sys.exit(411)
s = xmlrpclib.Server(self.url_cobbler_api)
try:
s.ping()
except:
print >> sys.stderr, "httpd does not appear to be running and proxying cobbler, or SELinux is in the way. Original traceback:"
traceback.print_exc()
sys.exit(411)
if not os.path.exists("/var/lib/cobbler/web.ss"):
print >> sys.stderr, "Missing login credentials file. Has cobblerd failed to start?"
sys.exit(411)
if not os.access("/var/lib/cobbler/web.ss", os.R_OK):
print >> sys.stderr, "User cannot run command line, need read access to /var/lib/cobbler/web.ss"
sys.exit(411)
def run(self, args):
"""
Process the command line and do what the user asks.
"""
self.token = self.remote.login("", self.shared_secret)
object_type = self.get_object_type(args)
object_action = self.get_object_action(object_type, args)
direct_action = self.get_direct_action(object_type, args)
try:
if object_type is not None:
if object_action is not None:
self.object_command(object_type, object_action)
else:
self.print_object_help(object_type)
elif direct_action is not None:
self.direct_command(direct_action)
else:
self.print_help()
except xmlrpclib.Fault, err:
if err.faultString.find("cobbler.cexceptions.CX") != -1:
print self.cleanup_fault_string(err.faultString)
else:
print "### ERROR ###"
print "Unexpected remote error, check the server side logs for further info"
print err.faultString
sys.exit(1)
def cleanup_fault_string(self,str):
"""
Make a remote exception nicely readable by humans so it's not evident that is a remote
fault. Users should not have to understand tracebacks.
"""
if str.find(">:") != -1:
(first, rest) = str.split(">:",1)
if rest.startswith("\"") or rest.startswith("\'"):
rest = rest[1:]
if rest.endswith("\"") or rest.endswith("\'"):
rest = rest[:-1]
return rest
else:
return str
def get_fields(self, object_type):
"""
For a given name of an object type, return the FIELDS data structure.
"""
# FIXME: this should be in utils, or is it already?
if object_type == "distro":
return item_distro.FIELDS
elif object_type == "profile":
return item_profile.FIELDS
elif object_type == "system":
return item_system.FIELDS
elif object_type == "repo":
return item_repo.FIELDS
elif object_type == "image":
return item_image.FIELDS
elif object_type == "mgmtclass":
return item_mgmtclass.FIELDS
elif object_type == "package":
return item_package.FIELDS
elif object_type == "file":
return item_file.FIELDS
elif object_type == "setting":
return settings.FIELDS
def object_command(self, object_type, object_action):
"""
Process object-based commands such as "distro add" or "profile rename"
"""
task_id = -1 # if assigned, we must tail the logfile
fields = self.get_fields(object_type)
if object_action in [ "add", "edit", "copy", "rename", "find" ]:
utils.add_options_from_fields(object_type, self.parser, fields, object_action)
elif object_action in [ "list" ]:
pass
elif object_action not in ("reload","update"):
self.parser.add_option("--name", dest="name", help="name of object")
elif object_action == "reload":
self.parser.add_option("--filename", dest="filename", help="filename to load data from")
(options, args) = self.parser.parse_args()
# the first three don't require a name
if object_action == "report":
if options.name is not None:
report_item(self.remote,object_type,None,options.name)
else:
report_items(self.remote,object_type)
elif object_action == "list":
list_items(self.remote, object_type)
elif object_action == "find":
items = self.remote.find_items(object_type, utils.strip_none(vars(options), omit_none=True), "name", False)
for item in items:
print item
elif object_action in OBJECT_ACTIONS:
if opt(options, "name") == "" and object_action not in ("reload","update"):
print "--name is required"
sys.exit(1)
if object_action in [ "add", "edit", "copy", "rename", "remove" ]:
try:
if object_type == "setting":
settings = self.remote.get_settings()
if not settings.get('allow_dynamic_settings',False):
raise RuntimeError("Dynamic settings changes are not enabled. Change the allow_dynamic_settings to 1 and restart cobblerd to enable dynamic settings changes")
elif options.name == 'allow_dynamic_settings':
raise RuntimeError("Cannot modify that setting live")
elif self.remote.modify_setting(options.name,options.value,self.token):
raise RuntimeError("Changing the setting failed")
else:
self.remote.xapi_object_edit(object_type, options.name, object_action, utils.strip_none(vars(options), omit_none=True), self.token)
except xmlrpclib.Fault, (err):
(etype, emsg) = err.faultString.split(":",1)
print emsg[1:-1] # don't print the wrapping quotes
sys.exit(1)
except RuntimeError, (err):
print err.args[0]
sys.exit(1)
elif object_action == "getks":
if object_type == "profile":
data = self.remote.generate_kickstart(options.name,"")
elif object_type == "system":
data = self.remote.generate_kickstart("",options.name)
print data
elif object_action == "dumpvars":
if object_type == "profile":
data = self.remote.get_blended_data(options.name,"")
elif object_type == "system":
data = self.remote.get_blended_data("",options.name)
# FIXME: pretty-printing and sorting here
keys = data.keys()
keys.sort()
for x in keys:
print "%s : %s" % (x, data[x])
elif object_action in [ "poweron", "poweroff", "powerstatus", "reboot" ]:
power={}
power["power"] = object_action.replace("power","")
power["systems"] = [options.name]
task_id = self.remote.background_power_system(power, self.token)
elif object_action == "update":
task_id = self.remote.background_signature_update(utils.strip_none(vars(options),omit_none=True), self.token)
elif object_action == "reload":
filename = opt(options,"filename","/var/lib/cobbler/distro_signatures.json")
if not utils.load_signatures(filename,cache=True):
print "There was an error loading the signature data in %s." % filename
print "Please check the JSON file or run 'cobbler signature update'."
return False
else:
print "Signatures were successfully loaded"
else:
raise exceptions.NotImplementedError()
else:
raise exceptions.NotImplementedError()
# FIXME: add tail/polling code here
if task_id != -1:
self.print_task(task_id)
self.follow_task(task_id)
return True
# BOOKMARK
def direct_command(self, action_name):
"""
Process non-object based commands like "sync" and "hardlink"
"""
task_id = -1 # if assigned, we must tail the logfile
if action_name == "buildiso":
defaultiso = os.path.join(os.getcwd(), "generated.iso")
self.parser.add_option("--iso", dest="iso", default=defaultiso, help="(OPTIONAL) output ISO to this path")
self.parser.add_option("--profiles", dest="profiles", help="(OPTIONAL) use these profiles only")
self.parser.add_option("--systems", dest="systems", help="(OPTIONAL) use these systems only")
self.parser.add_option("--tempdir", dest="buildisodir", help="(OPTIONAL) working directory")
self.parser.add_option("--distro", dest="distro", help="(OPTIONAL) used with --standalone to create a distro-based ISO including all associated profiles/systems")
self.parser.add_option("--standalone", dest="standalone", action="store_true", help="(OPTIONAL) creates a standalone ISO with all required distro files on it")
self.parser.add_option("--source", dest="source", help="(OPTIONAL) used with --standalone to specify a source for the distribution files")
self.parser.add_option("--exclude-dns", dest="exclude_dns", action="store_true", help="(OPTIONAL) prevents addition of name server addresses to the kernel boot options")
self.parser.add_option("--mkisofs-opts", dest="mkisofs_opts", help="(OPTIONAL) extra options for mkisofs")
(options, args) = self.parser.parse_args()
task_id = self.start_task("buildiso",options)
elif action_name == "replicate":
self.parser.add_option("--master", dest="master", help="Cobbler server to replicate from.")
self.parser.add_option("--distros", dest="distro_patterns", help="patterns of distros to replicate")
self.parser.add_option("--profiles", dest="profile_patterns", help="patterns of profiles to replicate")
self.parser.add_option("--systems", dest="system_patterns", help="patterns of systems to replicate")
self.parser.add_option("--repos", dest="repo_patterns", help="patterns of repos to replicate")
self.parser.add_option("--image", dest="image_patterns", help="patterns of images to replicate")
self.parser.add_option("--mgmtclasses", dest="mgmtclass_patterns", help="patterns of mgmtclasses to replicate")
self.parser.add_option("--packages", dest="package_patterns", help="patterns of packages to replicate")
self.parser.add_option("--files", dest="file_patterns", help="patterns of files to replicate")
self.parser.add_option("--omit-data", dest="omit_data", action="store_true", help="do not rsync data")
self.parser.add_option("--sync-all", dest="sync_all", action="store_true", help="sync all data")
self.parser.add_option("--prune", dest="prune", action="store_true", help="remove objects (of all types) not found on the master")
(options, args) = self.parser.parse_args()
task_id = self.start_task("replicate",options)
elif action_name == "aclsetup":
self.parser.add_option("--adduser", dest="adduser", help="give acls to this user")
self.parser.add_option("--addgroup", dest="addgroup", help="give acls to this group")
self.parser.add_option("--removeuser", dest="removeuser", help="remove acls from this user")
self.parser.add_option("--removegroup", dest="removegroup", help="remove acls from this group")
(options, args) = self.parser.parse_args()
task_id = self.start_task("aclsetup",options)
elif action_name == "version":
version = self.remote.extended_version()
print "Cobbler %s" % version["version"]
print " source: %s, %s" % (version["gitstamp"], version["gitdate"])
print " build time: %s" % version["builddate"]
elif action_name == "hardlink":
(options, args) = self.parser.parse_args()
task_id = self.start_task("hardlink",options)
elif action_name == "reserialize":
(options, args) = self.parser.parse_args()
task_id = self.start_task("reserialize",options)
elif action_name == "status":
(options, args) = self.parser.parse_args()
print self.remote.get_status("text",self.token)
elif action_name == "validateks":
(options, args) = self.parser.parse_args()
task_id = self.start_task("validateks",options)
elif action_name == "get-loaders":
self.parser.add_option("--force", dest="force", action="store_true", help="overwrite any existing content in /var/lib/cobbler/loaders")
(options, args) = self.parser.parse_args()
task_id = self.start_task("dlcontent",options)
elif action_name == "import":
self.parser.add_option("--arch", dest="arch", help="OS architecture being imported")
self.parser.add_option("--breed", dest="breed", help="the breed being imported")
self.parser.add_option("--os-version", dest="os_version", help="the version being imported")
self.parser.add_option("--path", dest="path", help="local path or rsync location")
self.parser.add_option("--name", dest="name", help="name, ex 'RHEL-5'")
self.parser.add_option("--available-as", dest="available_as", help="tree is here, don't mirror")
self.parser.add_option("--kickstart", dest="kickstart_file", help="assign this kickstart file")
self.parser.add_option("--rsync-flags", dest="rsync_flags", help="pass additional flags to rsync")
(options, args) = self.parser.parse_args()
task_id = self.start_task("import",options)
elif action_name == "reposync":
self.parser.add_option("--only", dest="only", help="update only this repository name")
self.parser.add_option("--tries", dest="tries", help="try each repo this many times", default=1)
self.parser.add_option("--no-fail", dest="nofail", help="don't stop reposyncing if a failure occurs", action="store_true")
(options, args) = self.parser.parse_args()
task_id = self.start_task("reposync",options)
elif action_name == "aclsetup":
(options, args) = self.parser.parse_args()
# FIXME: missing options, add them here
task_id = self.start_task("aclsetup",options)
elif action_name == "check":
results = self.remote.check(self.token)
ct = 0
if len(results) > 0:
print "The following are potential configuration items that you may want to fix:\n"
for r in results:
ct = ct + 1
print "%s : %s" % (ct, r)
print "\nRestart cobblerd and then run 'cobbler sync' to apply changes."
else:
print "No configuration problems found. All systems go."
elif action_name == "sync":
(options, args) = self.parser.parse_args()
self.parser.add_option("--verbose", dest="verbose", action="store_true", help="run sync with more output")
task_id = self.start_task("sync",options)
elif action_name == "report":
(options, args) = self.parser.parse_args()
print "distros:\n=========="
report_items(self.remote,"distro")
print "\nprofiles:\n=========="
report_items(self.remote,"profile")
print "\nsystems:\n=========="
report_items(self.remote,"system")
print "\nrepos:\n=========="
report_items(self.remote,"repo")
print "\nimages:\n=========="
report_items(self.remote,"image")
print "\nmgmtclasses:\n=========="
report_items(self.remote,"mgmtclass")
print "\npackages:\n=========="
report_items(self.remote,"package")
print "\nfiles:\n=========="
report_items(self.remote,"file")
elif action_name == "list":
# no tree view like 1.6? This is more efficient remotely
# for large configs and prevents xfering the whole config
# though we could consider that...
(options, args) = self.parser.parse_args()
print "distros:"
list_items(self.remote,"distro")
print "\nprofiles:"
list_items(self.remote,"profile")
print "\nsystems:"
list_items(self.remote,"system")
print "\nrepos:"
list_items(self.remote,"repo")
print "\nimages:"
list_items(self.remote,"image")
print "\nmgmtclasses:"
list_items(self.remote,"mgmtclass")
print "\npackages:"
list_items(self.remote,"package")
print "\nfiles:"
list_items(self.remote,"file")
else:
print "No such command: %s" % action_name
sys.exit(1)
# FIXME: run here
# FIXME: add tail/polling code here
if task_id != -1:
self.print_task(task_id)
self.follow_task(task_id)
return True
def print_task(self, task_id):
print "task started: %s" % task_id
events = self.remote.get_events()
(etime, name, status, who_viewed) = events[task_id]
atime = time.asctime(time.localtime(etime))
print "task started (id=%s, time=%s)" % (name, atime)
def follow_task(self, task_id):
logfile = "/var/log/cobbler/tasks/%s.log" % task_id
# adapted from: http://code.activestate.com/recipes/157035/
file = open(logfile,'r')
#Find the size of the file and move to the end
#st_results = os.stat(filename)
#st_size = st_results[6]
#file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if line.find("### TASK COMPLETE ###") != -1:
print "*** TASK COMPLETE ***"
sys.exit(0)
if line.find("### TASK FAILED ###") != -1:
print "!!! TASK FAILED !!!"
sys.exit(1)
if not line:
time.sleep(1)
file.seek(where)
else:
if line.find(" | "):
line = line.split(" | ")[-1]
print line, # already has newline
def print_object_help(self, object_type):
"""
Prints the subcommands for a given object, e.g. "cobbler distro --help"
"""
commands = OBJECT_ACTIONS_MAP[object_type]
commands.sort()
print "usage\n====="
for c in commands:
print "cobbler %s %s" % (object_type, c)
sys.exit(2)
def print_help(self):
"""
Prints general-top level help, e.g. "cobbler --help" or "cobbler" or "cobbler command-does-not-exist"
"""
print "usage\n====="
print "cobbler <distro|profile|system|repo|image|mgmtclass|package|file> ... "
print " [add|edit|copy|getks*|list|remove|rename|report] [options|--help]"
print "cobbler <%s> [options|--help]" % "|".join(DIRECT_ACTIONS)
sys.exit(2)
def main():
"""
CLI entry point
"""
cli = BootCLI()
cli.check_setup()
rc = cli.run(sys.argv)
if rc == True or rc is None:
sys.exit(0)
elif rc == False:
sys.exit(1)
return sys.exit(rc)
if __name__ == "__main__":
main()
|
nacc/cobbler
|
cobbler/cli.py
|
Python
|
gpl-2.0
| 28,939
|
from django.shortcuts import render, render_to_response
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import ListView
from bookrental.forms import UserCreateForm
from bookrental.models import Book
from bookrental.tables import BookTable
from bookrental.models import Cart
from bookrental.tables import CartTable
from bookrental.models import Prices
from bookrental.tables import PriceTable
from django_tables2 import RequestConfig
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.db.models import F
from django.db.models import Q
from bookrental.models import Returns
from bookrental.tables import ReturnTable
# Create your views here.
def book(request):
c = {}
c.update(csrf(request))
# select all the books with the user's current category selected
select_books_from = request.POST.get('books')
table = BookTable(Book.objects.filter(category=request.POST.get('books'))) # request.session['category']))
RequestConfig(request).configure(table)
if request.method == "GET":
#pks = request.POST.getlist("selection")
pks = request.GET.getlist("selection")
selected_books = Book.objects.filter(pk__in=pks)
# put selected books in cart
# TODO: Doesn't work; not saving to the cart table!!!
#for p in pks:
kcart = Cart(isbn='978-123456', quantity=1, price=0)
#for p in Prices.objects.all():
# if b.isbn == p.isbn:
# kcart.price = p.price
# break
kcart.save()
#table = CartTable(Cart.objects.all())))))
#RequestConfig(request).configure(table)
# pass these books to cart page
return HttpResponseRedirect(reverse('cart'))#, c, {'table': table})
return render(request, 'bookrental/Books.html', {'table': table, 'select_books_from': select_books_from})
def checkout(request):
# displays a successful checkout page
return render_to_response('bookrental/Checkout.html')
def info(request):
return render_to_response('bookrental/InfoPage.html')
def login_page(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the login button was clicked, authenticate the given user/pass combo
username1 = request.POST.get('username')
password1 = request.POST.get('password')
user = authenticate(username=username1, password=password1)
if user is not None:
login(request, user)
# update session
request.session['username'] = username1
# good login, so go to warning page
return HttpResponseRedirect('warning/')
else:
# bad login, so go to failure
return HttpResponseRedirect('login_failure/')
return render_to_response('bookrental/Login.html', c)
def return_confirm(request):
# display a return confirmation page
return render_to_response('bookrental/ReturnConfirm.html')
def returns(request):
c = {}
c.update(csrf(request))
# Create a table of all returnable objects
table = ReturnTable(Returns.objects.all())
RequestConfig(request).configure(table)
if request.method == "POST":
# get list of returning books, delete from total returns
pks = request.POST.getlist("returning")
returned_books = Returns.objects.filter(~Q(pk__in=pks))
# pass these books to return confirmation page as table
table = ReturnTable(returned_books)
RequestConfig(request).configure(table)
return render(request, 'bookrental/ReturnConfirm.html', {'table': table})
return render(request, 'bookrental/Returns.html', {'table': table})
def warning(request):
# displays the disclaimer page
return render_to_response('bookrental/Warning.html')
def cart(request):
c = {}
c.update(csrf(request))
pks = request.GET.getlist("selection")
# get new books to add, join with price table
new_cart = Cart.objects.all()
for c in new_cart:
for p in pks:
# if a cart item is not selected, delete it
if c.isbn != p:
c.delete()
table = CartTable(new_cart)
RequestConfig(request).configure(table)
if request.method == "POST":
pks = request.POST.getlist("removed")
# add all books NOT in removed
removed_books = Cart.objects.filter(~Q(pk__in=pks))
#pass these books to cart page as table
table = CartTable(removed_books)
RequestConfig(request).configure(table)
# display updated table on same page
return render(request, 'bookrental/YourCart.html', {'table': table})
return render(request, 'bookrental/YourCart.html', {'table': table})
def category(request):
c = {}
c.update(csrf(request))
# all available categories for books
categories = {"programming_languages", "software_engineering", "computer_networking", "operating_systems", "database_systems", "computer_organization"}
if request.method == 'POST':
# if the button was pressed, pass the selected category to the books page
select_books_from = request.POST.get('books')
request.session['category'] = select_books_from
return HttpResponseRedirect(reverse('book'), c, {'select_books_from': select_books_from})
return render_to_response('bookrental/category.html', c, context_instance=RequestContext(request))
def login_failure(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the button was clicked, authenticate user and pass in auth_user table
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
# if the user/pass pair is good, login and redirect to warning page
login(request, user)
# update session
request.session['username'] = username
return HttpResponseRedirect(reverse('warning'))
return render_to_response('bookrental/login_failure.html', c)
def logout_page(request):
# clear out their cart
for c in Cart.objects.all():
c.delete()
# logout the user
logout(request)
# go back to the login page
return render(request, 'bookrental/Login.html')
# Register a new user with a custom form, log them in, and redirect to the Warning page.
def new_user(request):
if request.method == 'POST':
# when they hit submit, check if their form is correct
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
username1 = user_form.clean_username()
password = user_form.clean_password2()
user_form.save()
user = authenticate(username=username1, password=password)
login(request, user)
# update current session
request.session['username'] = username1
return HttpResponseRedirect(reverse('warning'))
user_form = UserCreateForm()
return render(request, 'bookrental/new_user.html', {'user_form': user_form})
def update_user(request):
if request.method == 'POST':
# if they hit submit, get their user and pass
username = request.session['username'] # request.user
password = request.POST.get('password')
# Current password is correct, so can set new password
if authenticate(username=username, passoword=password) is not None:
request.user.set_password(request.POST.get('new_password'))
request.user.email = request.POST.get('email')
# go to category page
return HttpResponseRedirect(reverse('category'))
return render_to_response('bookrental/update_user.html')
################################################
|
AHaymanDev/CS453DjangoProj
|
bookrental/views.py
|
Python
|
gpl-2.0
| 8,027
|
# -*- coding: utf-8 -*-
import regexUtils
import re
import urllib
import urlparse
def findJS(data):
idName = '(?:f*id|ch)'
jsName = '([^\"\']+?\.js[^\"\']*?)'
regex = "(?:java)?scr(?:'\+')?ipt.*?" + idName + "\s*=\s*[\"']([^\"']+)[\"'][^<]*</scr(?:'\+')?ipt\s*>[^<]*<scr(?:'\+')?ipt[^<]*src=[\"']" + jsName + "[\"']"
jscript = regexUtils.findall(data, regex)
if jscript:
jscript = filter(lambda x: x[1].find('twitter') == -1, jscript)
return jscript
return None
def findPHP(data, streamId):
regex = "document.write\('.*?src=['\"]*(.*?.(?:php|html)[^&\"]*).*?['\" ]*.*?\)"
php = regexUtils.findall(data, regex)
if php:
return re.sub(r"\'\+\s*(?:[fc]*id|ch)\s*\+\'", "%s" % streamId,php[0])
regex = "document.write\('.*?src=['\"]*(.*?(?:f*id|ch)\s*\+'\.html*).*?['\" ]*.*?\)"
html = regexUtils.findall(data, regex)
if html:
return re.sub(r"\'\+\s*(?:f*id|ch)\s*\+\'", "%s" % streamId,html[0])
return None
def findRTMP(url, data):
#if data.lower().find('rtmp') == -1:
# return None
try:
text = str(data)
except:
text = data
#method 1
#["'=](http://[^'" ]*.swf[^'" ]*file=([^&"']+)[^'" ]*&streamer=([^"'&]+))
#streamer=([^&"]+).*?file=([^&"]+).*?src="([^"]+.swf)"
# method 2
#"([^"]+.swf\?.*?file=(rtmp[^&]+)&.*?id=([^&"]+)[^"]*)"
sep1 = '[\'"&\? ]'
sep2 = '(?:[\'"]\s*(?:,|\:)\s*[\'"]|=)'
value = '([^\'"&]+)'
method1 = True
method2 = False
radius = 400
playpath = ''
swfUrl = ''
rtmp = regexUtils.findall(text, sep1 + 'streamer' + sep2 + value)
if not rtmp:
tryMethod2 = regexUtils.findall(text, sep1 + 'file' + sep2 + value)
if tryMethod2 and tryMethod2[0].startswith('rtmp'):
method1 = False
method2 = True
rtmp = tryMethod2
if rtmp:
for r in rtmp:
tmpRtmp = r.replace('/&','').replace('&','')
idx = text.find(tmpRtmp)
min_idx = 0
max_idx = len(text) - 1
start = idx-radius
if start < min_idx:
start = min_idx
end = idx+radius
if end > max_idx:
end = max_idx
area = text[start:end]
clipStart = idx+len(tmpRtmp)
if clipStart < max_idx:
text = text[clipStart:]
if method1:
playpath = regexUtils.findall(area, sep1 + 'file' + sep2 + value)
if method2:
playpath = regexUtils.findall(area, sep1 + 'id' + sep2 + value)
if playpath:
tmpRtmp = tmpRtmp + '/' + playpath[0]
if playpath:
swfUrl = regexUtils.findall(area, 'SWFObject\([\'"]([^\'"]+)[\'"]')
if not swfUrl:
swfUrl = regexUtils.findall(area, sep1 + '([^\'"& ]+\.swf)')
if not swfUrl:
swfUrl = regexUtils.findall(data, sep1 + '([^\'"& ]+\.swf)')
if swfUrl:
finalSwfUrl = swfUrl[0]
if not finalSwfUrl.startswith('http'):
finalSwfUrl = urlparse.urljoin(url, finalSwfUrl)
regex = '://(.*?)/'
server = regexUtils.findall(tmpRtmp, regex)
if server:
if server[0].find(':') == -1:
tmpRtmp = tmpRtmp.replace(server[0], server[0] + ':1935')
return [tmpRtmp, playpath[0], finalSwfUrl]
return None
def getHostName(url):
scheme = urlparse.urlparse(url)
if scheme:
return scheme.netloc.replace('www.','')
return None
def findFrames(data):
if data.lower().find('frame') == -1:
return None
return regexUtils.findall(data, "(frame[^>]*)>")
def findContentRefreshLink(data):
regex = '0;\s*url=([^\'" ]+)'
links = regexUtils.findall(data, regex)
if links:
return links[0]
regex = 'window.location\s*=\s*[\'"]([^\'"]+)[\'"]'
links = regexUtils.findall(data, regex)
if links:
return links[0]
regex = 'frame\s*scrolling=\"auto\"\s*noresize\s*src\s*=\s*[\'"]([^\'"]+)[\'"]'
links = regexUtils.findall(data, regex)
if links:
return links[0]
regex = 'href=[\'"]([^\'"]+)[\'"]\s*target="_blank"><img class="alignnone"'
links = regexUtils.findall(data, regex)
if links:
return links[0]
return None
def findEmbedPHPLink(data):
regex = '<script type="text/javascript" src="((?![^"]+localtimes)(?![^"]+adcash)[^"]+\.php\?[^"]+)"\s*>\s*</script>'
links = regexUtils.findall(data, regex)
if links:
return links[0]
return None
def findVideoFrameLink(page, data):
minheight=300
minwidth=300
frames = findFrames(data)
if not frames:
return None
iframes = regexUtils.findall(data, "(frame(?![^>]*cbox\.ws)(?![^>]*Publi)(?![^>]*chat\d*\.\w+)(?![^>]*ad122m)(?![^>]*adshell)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*\sheight\s*=\s*[\"']*([\%\d]+)(?:px)?[\"']*[^>]*>)")
if iframes:
for iframe in iframes:
if iframe[1] == '100%':
height = minheight+1
else:
height = int(iframe[1])
if height > minheight:
m = regexUtils.findall(iframe[0], "[\"' ]width\s*=\s*[\"']*(\d+[%]*)(?:px)?[\"']*")
if m:
if m[0] == '100%':
width = minwidth+1
else:
width = int(m[0])
if width > minwidth:
m = regexUtils.findall(iframe[0], '[\'"\s]+src\s*=\s*["\']*\s*([^>"\' ]+)\s*[>"\']*')
if m:
if 'premiertv' in page:
page = page+'/'
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
# Alternative 1
iframes = regexUtils.findall(data, "(frame(?![^>]*cbox\.ws)(?![^>]*capacanal)(?![^>]*blacktvlive\.com)[^>]*[\"; ]height:\s*(\d+)[^>]*>)")
if iframes:
for iframe in iframes:
height = int(iframe[1])
if height > minheight:
m = regexUtils.findall(iframe[0], "[\"; ]width:\s*(\d+)")
if m:
width = int(m[0])
if width > minwidth:
m = regexUtils.findall(iframe[0], '[\"; ]src=["\']*\s*([^>"\' ]+)\s*[>"\']*')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
# Alternative 2 (Frameset)
m = regexUtils.findall(data, '<(?:FRAMESET|frameset)[^>]+100%[^>]+>\s*<(?:FRAME|frame)[^>]+src="([^"]+)"')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
m = regexUtils.findall(data, '<a href="([^"]+)" target="_blank"><img src="[^"]+" height="\d+" width="\d+" longdesc="[^"]+"/></a>')
if m:
return urlparse.urljoin(urllib.unquote(page), m[0]).strip()
return None
|
fkeles/sports
|
lib/utils/scrapingUtils.py
|
Python
|
gpl-2.0
| 7,381
|
#!/usr/bin/python
#
# pyku
# ====
# Python-based random haiku generator
#
# Chris Collins, <collins.christopher@gmail.com>
#
# v0.5 - 2013-11-15
#
# Copyright (C) 2013 Chris Collins
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
debug = False
debug_word = ""
LOCAL_PATH = os.path.dirname(os.path.realpath(__file__))
### TODO:###
# Unhandled Exceptions:
# "ai" is two syllables
# "ia" may be two syllables, eg. 'negotiated'
# "oa" may be two syllables, eg. 'Croation'
# "-ed" is usually silent, unless following double t"s
# "-ier" is usually two syllables
# Some -le not being handled right:
# maybe any le following a vowel is 1 syllable
# and follwing a consonant is 2?
# ex. "tinkle" being shown as 1 syllable
# French-based words SUCK: "serviette" shown as 5 syllables
# "-es" still not quite right: debauches shown as 2 syllables
# What about "-iest"? 2 syllables: Roomiest being shown as 2 instead of 3
# Sonofa: Cheeseburger = 4 syllables. Tripping over the middle "e"
### Import the necessary modules ###
# Import web for web.py support
# Import os for web.py to Apache connection
# Import RE for regular expression matching
# Import random to grab random words
import web
import os
import re
if not debug_word:
import random
### Set global variables ###
# The starting number of syllables
syllables = 0
# Our Random Word
glorand_word = ""
urls = (
"/", "index"
)
class index:
def GET(self):
return render.index("pyku")
def pyku():
"""
Build a Haiku
"""
tonic = buildline(5)
penultimate = buildline(7)
ultimate = buildline(5)
return tonic
return penultimate
return ultimate
def buildline(line_syllables):
"""
Build each line from random words
"""
line_list = []
our_syllables = 0
while (our_syllables < line_syllables):
randomword()
if debug:
print glorand_word
if (our_syllables + syllables) > line_syllables:
randomword()
else:
#print glorand_word
#print syllables
line_list.append(glorand_word)
our_syllables += syllables
if debug:
print "My Line Syllables:", line_syllables
print "My Syllables So Far:", our_syllables
return ' '.join(line_list)
def randomword():
"""
Gets a random word from the Ubuntu american-english dictionary
"""
# Reset the syllable count
syleq()
if debug_word:
random_word = debug_word
else:
# Open our word list
text = open(os.path.join(LOCAL_PATH, "american-english"))
words = text.read()
random_word = random.choice(words.split())
if debug:
print random_word
check_possessive(random_word)
def check_possessive(random_word):
"""
For now, we want to throw back possessive words.
"""
poss = re.match(r".*'s", random_word, re.IGNORECASE)
if poss:
randomword()
else:
if debug:
print "Our word is:", random_word
global glorand_word
glorand_word = random_word
vowelfind(random_word)
def vowelfind(random_word):
"""
Find the vowel clusters in the random word
"""
vowel_list = "[aeiouy]+"
vowels = re.findall(vowel_list, random_word, re.IGNORECASE)
if vowels:
vowelcount = len(vowels)
if debug:
print vowels
global syllables
syllables += vowelcount
vowelcontext(random_word)
else:
randomword()
def vowelcontext(random_word):
"""
Container module for running through
the list of checks we need to do to count
syllables.
"""
if debug:
print "Going into 'vowelcontext':"
print "Number of Syllables, maybe: ", syllables
trailing_e(random_word)
# Obsoleted by adding 'y' to vowel list
# trailing_y(random_word)
def trailing_e(random_word):
"""
First:
Check if word ends in '-e', or optionally, '-es',
not immediately preceeded by another vowel OR ending in '-que'
AND does not end in '-ble' or '-ses', THEN decrements the
syllable count.
UNLESS - there is only 1 syllable.
Cases:
fare, faires, tree - matches first, does not decrement
martinique - does not match first, does match second, decrements
unibroue - does not match first or second, does not decrement
# TODO - Unhandled Exceptions:
fire - could be two syllables
"""
# Finds trailing -e(s) WITHOUT preceeding vowels OR ending in '-que'
#trail_e = re.findall(r"[^aeiou]+?e[s]?$", random_word, re.IGNORECASE)
trail_e_que = re.findall(r"((qu)|([^aeiou]))+?e[s]?$",
random_word,
re.IGNORECASE)
# Check for '-ble or -ses'
trail_ses_ble = re.findall(r"((bl)|(s))e[s]?$",
random_word,
re.IGNORECASE)
if trail_e_que and not trail_ses_ble:
if debug:
print trail_e_que
print """
Trailing '-e(s)' or '-que' characters
and no trailing '-ble' or '-ses'."""
syldec(1)
if debug:
print "Leaving 'trailing_e':"
print "Number of Syllables, maybe: ", syllables
modcount("trailing_e")
def sylinc(i):
global syllables
syllables += i
def syldec(i):
global syllables
if syllables > 1: # Can't reduce to 0
syllables -= i
def syleq():
global syllables
syllables = 0
def modcount(mod):
if debug:
print "Leaving '" + mod + "' - "
print "Number of Syllables is, maybe: ", syllables
app = web.application(urls, globals())
render = web.template.render(LOCAL_PATH + "/templates/", globals={"buildline": buildline})
curdir = os.path.dirname(__file__)
session = web.session.Session(
app, web.session.DiskStore(
os.path.join(curdir, "%s/sessions" % LOCAL_PATH)),)
application = app.wsgifunc()
|
clcollins/pyku
|
pyku-web.py
|
Python
|
gpl-2.0
| 6,637
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import os
import re
from invenio.dbquery import run_sql
from invenio.websubmit_config import InvenioWebSubmitFunctionStop
def Check_Group(parameters, curdir, form, user_info=None):
"""
Check that a group exists.
Read from file "/curdir/Group"
If the group does not exist, switch to page 1, step 0
"""
#Path of file containing group
if os.path.exists("%s/%s" % (curdir,'Group')):
fp = open("%s/%s" % (curdir,'Group'),"r")
group = fp.read()
group = group.replace("/","_")
group = re.sub("[\n\r]+","",group)
res = run_sql ("""SELECT id FROM usergroup WHERE name = %s""", (group,))
if len(res) == 0:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
else:
raise InvenioWebSubmitFunctionStop("""
<SCRIPT>
document.forms[0].action="/submit";
document.forms[0].curpage.value = 1;
document.forms[0].step.value = 0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('The given group name (%s) is invalid.');
</SCRIPT>""" % (group,))
return ""
|
lbjay/cds-invenio
|
modules/websubmit/lib/functions/Check_Group.py
|
Python
|
gpl-2.0
| 2,200
|
import unittest
from geobricks_modis.core import modis_core as c
class GeobricksModisTest(unittest.TestCase):
def test_get_modis_product_table(self):
products = c.get_modis_product_table()
self.assertEqual(len(products), 68)
def test_list_products(self):
products = c.list_products()
self.assertEqual(len(products), 427)
def test_list_years(self):
years = c.list_years('MOD13A2')
self.assertEqual(len(years), 16)
def test_list_days(self):
days = c.list_days('MOD13A2', '2010')
self.assertEqual(len(days), 23)
def test_list_layers(self):
layers = c.list_layers('MOD13A2', 2014, '001')
self.assertEqual(len(layers), 286)
layers = c.list_layers('MYD11C1', 2014, '001')
self.assertEqual(len(layers), 1)
def test_list_layers_subset(self):
layers = c.list_layers_subset('MOD13A2', '2010', '001', 5, 7, 3, 9)
self.assertEqual(len(layers), 5)
layers = c.list_layers_subset('MYD11C1', '2010', '001', 5, 7, 3, 9)
self.assertEqual(len(layers), 1)
def test_list_layers_countries_subset(self):
layers = c.list_layers_countries_subset('MOD13A2', '2010', '001', '8,IT,fra')
self.assertEqual(len(layers), 12)
def test_list_layers_countries_subset_gaul(self):
layers = c.list_layers_countries_subset('MOD13A2', '2010', '001', '8,1')
self.assertEqual(len(layers), 8)
def test_list_layers_countries_subset_iso2(self):
layers = c.list_layers_countries_subset_iso2('MOD13A2', '2010', '001', 'IT,FR')
self.assertEqual(len(layers), 7)
def test_list_layers_countries_subset_iso3(self):
layers = c.list_layers_countries_subset_iso3('MOD13A2', '2010', '001', 'ITA,FRA')
self.assertEqual(len(layers), 7)
def test_day_of_the_year_to_date(self):
date = c.day_of_the_year_to_date('017', 2014)
date_string = date.strftime("%Y-%m-%d %H:%M:%S").split(' ')[0]
self.assertEqual(date_string, '2014-01-17')
def test_list_countries(self):
out = c.list_countries()
self.assertEquals(len(out), 277)
if __name__ == '__main__':
unittest.main()
|
geobricks/geobricks_modis
|
geobricks_modis_test/core/test_modis_core.py
|
Python
|
gpl-2.0
| 2,201
|
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""BibMatch - tool to match records with database content of an Invenio instance,
either locally or remotely through invenio_connector."""
__revision__ = "$Id$"
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set #for "&" intersection
# pylint: enable=W0622
import string
import os
import getopt
import re
import getpass
from six import iteritems
from tempfile import mkstemp
from time import sleep
from invenio.config import CFG_SITE_SECURE_URL, CFG_BIBMATCH_FUZZY_WORDLIMITS, \
CFG_BIBMATCH_QUERY_TEMPLATES, \
CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT, \
CFG_BIBMATCH_LOCAL_SLEEPTIME, \
CFG_BIBMATCH_REMOTE_SLEEPTIME, \
CFG_SITE_RECORD, \
CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT
from invenio.legacy.bibmatch.config import CFG_BIBMATCH_LOGGER, \
CFG_LOGFILE
from invenio_client import InvenioConnector, \
InvenioConnectorAuthError
from invenio.legacy.bibrecord import create_records, \
record_get_field_values, record_xml_output, record_modify_controlfield, \
record_has_field, record_add_field
from invenio.legacy.bibconvert import api as bibconvert
from invenio.legacy.search_engine import get_fieldcodes, \
re_pattern_single_quotes, \
re_pattern_double_quotes, \
re_pattern_regexp_quotes, \
re_pattern_spaces_after_colon
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser
from invenio.legacy.dbquery import run_sql
from invenio.legacy.bibrecord.textmarc2xmlmarc import transform_file
from invenio.legacy.bibmatch.validator import validate_matches, transform_record_to_marc, \
validate_tag, BibMatchValidationError
from invenio.utils.text import translate_to_ascii, xml_entities_to_utf8
try:
from six import StringIO
except ImportError:
from StringIO import StringIO
re_querystring = re.compile("\s?([^\s$]*)\[(.+?)\]([^\s$]*).*?", re.DOTALL)
def usage():
"""Print help"""
print(""" BibMatch - match bibliographic data against database, either locally or remotely
Usage: %s [options] [QUERY]
Options:
Output:
-0 --print-new (default) print unmatched in stdout
-1 --print-match print matched records in stdout
-2 --print-ambiguous print records that match more than 1 existing records
-3 --print-fuzzy print records that match the longest words in existing records
-b --batch-output=(filename). filename.new will be new records, filename.matched will be matched,
filename.ambiguous will be ambiguous, filename.fuzzy will be fuzzy match
-t --text-marc-output transform the output to text-marc format instead of the default MARCXML
Simple query:
-q --query-string=(search-query/predefined-query) See "Querystring"-section below.
-f --field=(field)
General options:
-n --noprocess Do not print records in stdout.
-i, --input use a named file instead of stdin for input
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
-r, --remote=URL match against a remote Invenio installation (Full URL, no trailing '/')
Beware: Only searches public records attached to home collection
-a, --alter-recid The recid (controlfield 001) of matched or fuzzy matched records in
output will be replaced by the 001 value of the matched record.
Note: Useful if you want to replace matched records using BibUpload.
-z, --clean clean queries before searching
--no-validation do not perform post-match validation
-h, --help print this help and exit
-V, --version print version information and exit
Advanced options:
-m --mode=(a|e|o|p|r) perform an advanced search using special search mode.
Where mode is:
"a" all of the words,
"o" any of the words,
"e" exact phrase,
"p" partial phrase,
"r" regular expression.
-o --operator(a|o) used to concatenate identical fields in search query (i.e. several report-numbers)
Where operator is:
"a" boolean AND (default)
"o" boolean OR
-c --config=filename load querystrings from a config file. Each line starting with QRYSTR will
be added as a query. i.e. QRYSTR --- [title] [author]
-x --collection only perform queries in certain collection(s).
Note: matching against restricted collections requires authentication.
--user=USERNAME username to use when connecting to Invenio instance. Useful when searching
restricted collections. You will be prompted for password.
QUERYSTRINGS
Querystrings determine which type of query/strategy to use when searching for the
matching records in the database.
Predefined querystrings:
There are some predefined querystrings available:
title - standard title search. (i.e. "this is a title") (default)
title-author - title and author search (i.e. "this is a title AND Lastname, F")
reportnumber - reportnumber search (i.e. reportnumber:REP-NO-123).
You can also add your own predefined querystrings inside invenio.conf file.
You can structure your query in different ways:
* Old-style: fieldnames separated by '||' (conforms with earlier BibMatch versions):
-q "773__p||100__a"
* New-style: Invenio query syntax with "bracket syntax":
-q "773__p:\"[773__p]\" 100__a:[100__a]"
Depending on the structure of the query, it will fetch associated values from each record and put it into
the final search query. i.e in the above example it will put journal-title from 773__p.
When more then one value/datafield is found, i.e. when looking for 700__a (additional authors),
several queries will be put together to make sure all combinations of values are accounted for.
The queries are separated with given operator (-o, --operator) value.
Note: You can add more then one query to a search, just give more (-q, --query-string) arguments.
The results of all queries will be combined when matching.
BibConvert formats:
Another option to further improve your matching strategy is to use BibConvert formats. By using the formats
available by BibConvert you can change the values from the retrieved record-fields.
i.e. using WORDS(1,R) will only return the first (1) word from the right (R). This can be very useful when
adjusting your matching parameters to better match the content. For example only getting authors last-name
instead of full-name.
You can use these formats directly in the querystrings (indicated by '::'):
* Old-style: -q "100__a::WORDS(1,R)::DOWN()"
This query will take first word from the right from 100__a and also convert it to lower-case.
* New-style: -q "100__a:[100__a::WORDS(1,R)::DOWN()]"
See BibConvert documentation for a more detailed explanation of formats.
Predefined fields:
In addition to specifying distinct MARC fields in the querystrings you can use predefined
fields as configured in the LOCAL(!) Invenio system. These fields will then be mapped to one
or more fieldtags to be retrieved from input records.
Common predefined fields used in querystrings: (for Invenio demo site, your fields may vary!)
'abstract', 'affiliation', 'anyfield', 'author', 'coden', 'collaboration',
'collection', 'datecreated', 'datemodified', 'division', 'exactauthor', 'exactfirstauthor',
'experiment', 'fulltext', 'isbn', 'issn', 'journal', 'keyword', 'recid',
'reference', 'reportnumber', 'subject', 'title', 'year'
Examples:
$ bibmatch [options] < input.xml > unmatched.xml
$ bibmatch -b out -n < input.xml
$ bibmatch -a -1 < input.xml > modified_match.xml
$ bibmatch --field=title < input.xml
$ bibmatch --field=245__a --mode=a < input.xml
$ bibmatch --print-ambiguous -q title-author < input.xml > ambigmatched.xml
$ bibmatch -q "980:Thesis 773__p:\"[773__p]\" 100__a:[100__a]" -r "http://inspirebeta.net" < input.xml
$ bibmatch --collection 'Books,Articles' < input.xml
$ bibmatch --collection 'Theses' --user admin < input.xml
""" % (sys.argv[0],), file=sys.stderr)
sys.exit(1)
return
class Querystring:
"""
Holds the information about a querystring.
The object contains lists of fields, formats and queries which generates search queries.
self.fields is a dict of found field-data {"tag": [list of found record data]}
self.formats is a dict of found BibConvert formats {"tag": [list of found format-values]}
self.pattern contains the original search string
self.query contains the generated query
self.operator holds the current active operator, upper-case (OR/AND)
To populate the Querystring instance with values and search string structure,
call create_query(..) with BibRecord structure and a query-string to populate with retrieved values.
Example: The template "title:[245__a]" will retrieve the value from subfield 245__a in
given record. If any BibConvert formats are specified for this field, these will
be applied.
"""
def __init__(self, operator="AND", clean=False, ascii_mode=False):
"""
Creates Querystring instance.
@param operator: operator used to concatenate several queries
@type operator: str
@param clean: indicates if queries should be sanitized
@type clean: bool
"""
self.fields = {}
self.operator = operator.upper()
self.pattern = ""
self.query = ""
self.clean = clean
self.ascii_mode = ascii_mode
self.formats = {}
def create_query(self, record, qrystr="[title]"):
"""
Main method that parses and generates a search query from
given query-string structure and record data. Returns the
resulting query-string and completeness determination as a tuple.
A query is 'complete' when all found field references has a value
in the passed record. Should a value be missing, the query is
incomplete.
@param record: bibrecord to retrive field-values from
@type record: dict
@param qrystr: proper query string template. (i.e. title:[245__a])
defaults to: [title]
@type qrystr: str
@return: (query-string, complete flag)
@rtype: tuple
"""
if qrystr == "":
qrystr = "[title]"
if "||" in qrystr or not "[" in qrystr:
# Assume old style query-strings
qrystr = self._convert_qrystr(qrystr)
# FIXME: Convert to lower case, since fuzzy_parser
# which treats everything lower-case, and will cause problems when
# retrieving data from the self.fields dict.
# Also BibConvert formats are currently case sensitive, so we cannot
# force lower-case yet.
self.pattern = qrystr.lower()
self.fields = {}
# Extract referenced field-values from given record
complete, fieldtags_found = self._extract_fieldvalues(record, qrystr)
# If no field references are found, we exit as empty query.
if len(self.fields) == 0:
self.query = ""
return self.query, False
# Now we assemble the found values into a proper search query
all_queries = []
operator_delimiter = " %s " % (self.operator,)
if self.operator == "AND":
# We gather all the values from the self.fields and put them
# in a list together with any prefix/suffix associated with the field.
new_query = self.pattern
for (field_prefix, field_reference, field_suffix), value_list in iteritems(self.fields):
new_values = []
for value in value_list:
new_values.append("%s%s%s" % (field_prefix, value, field_suffix))
new_query = new_query.replace("%s[%s]%s" % (field_prefix, field_reference, field_suffix), \
operator_delimiter.join(set(new_values)))
all_queries = [new_query]
else:
# operator is OR, which means a more elaborate approach to multi-value fields
field_tuples = []
for key, values in iteritems(self.fields):
field_list = []
for value in values:
# We add key here to be able to associate the value later
field_list.append((key, value))
field_tuples.append(field_list)
# Grab all combinations of queries
query_tuples = cproduct(field_tuples)
for query in query_tuples:
new_query = self.pattern
for (field_prefix, field_reference, field_suffix), value in query:
new_query = new_query.replace("%s[%s]%s" % (field_prefix, field_reference, field_suffix), \
"%s%s%s" % (field_prefix, value, field_suffix))
all_queries.append(new_query)
# Finally we concatenate all unique queries into one, delimited by chosen operator
self.query = operator_delimiter.join(set(all_queries))
if not complete:
# Clean away any leftover field-name references from query
for fieldtag in fieldtags_found:
self.query = self.query.replace("%s" % (fieldtag,), "")
# Clean query?
if self.clean:
self._clean_query()
return self.query, complete
def fuzzy_queries(self):
"""
Returns a list of queries that are built more 'fuzzily' using the main query as base.
The list returned also contains the current operator in context, so each query is a tuple
of (operator, query).
@return: list of tuples [(operator, query), ..]
@rtype: list [(str, str), ..]
"""
fuzzy_query_list = []
operator_delimiter = " %s " % (self.operator,)
parser = SearchQueryParenthesisedParser()
query_parts = parser.parse_query(self.pattern)
author_query = []
author_operator = None
# Go through every expression in the query and generate fuzzy searches
for i in xrange(0, len(query_parts) - 1, 2):
current_operator = query_parts[i]
current_pattern = query_parts[i + 1]
fieldname_list = re_querystring.findall(current_pattern)
if fieldname_list == []:
# No reference to record value, add query 'as is'
fuzzy_query_list.append((current_operator, current_pattern))
else:
# Each reference will be split into prefix, field-ref and suffix.
# Example:
# 773__p:"[773__p]" 100__a:/.*[100__a].*/ =>
# [('773__p:"', '773__p', '"'), ('100__a:/.*', '100__a', '.*/')]
for field_prefix, field_reference, field_suffix in fieldname_list:
if field_reference == '245__a':
new_query = []
for value in self.fields.get((field_prefix, field_reference, field_suffix), []):
# Grab the x+1 longest words in the string and perform boolean OR
# for all combinations of x words (boolean AND)
# x is determined by the configuration dict and is tag-based. Defaults to 3 words
word_list = get_longest_words(value, limit=CFG_BIBMATCH_FUZZY_WORDLIMITS.get(field_reference, 3)+1)
for i in range(len(word_list)):
words = list(word_list)
words.pop(i)
new_query.append("(" + current_pattern.replace("[%s]" % (field_reference,), " ".join(words)) + ")")
fuzzy_query_list.append((current_operator, " OR ".join(new_query)))
elif field_reference == '100__a':
for value in self.fields.get((field_prefix, field_reference, field_suffix), []):
author_query.append(current_pattern.replace("[%s]" % (field_reference,), value))
author_operator = current_operator
elif field_reference == '700__a':
for value in self.fields.get((field_prefix, field_reference, field_suffix), []):
# take only the first 2nd author
author_query.append(current_pattern.replace("[%s]" % (field_reference,), value))
if not author_operator:
author_operator = current_operator
break
# for unique idenifier (DOI, repno) fuzzy search makes no sense
elif field_reference == '037__a':
continue
elif field_reference == '0247_a':
continue
else:
new_query = []
for value in self.fields.get((field_prefix, field_reference, field_suffix), []):
# Grab the x longest words in the string and perform boolean AND for each word
# x is determined by the configuration dict and is tag-based. Defaults to 3 words
# AND can be overwritten by command line argument -o o
word_list = get_longest_words(value, limit=CFG_BIBMATCH_FUZZY_WORDLIMITS.get(field_reference, 3))
for word in word_list:
# Create fuzzy query with key + word, including any surrounding elements like quotes, regexp etc.
new_query.append(current_pattern.replace("[%s]" % (field_reference,), word))
fuzzy_query_list.append((current_operator, operator_delimiter.join(new_query)))
if author_query:
fuzzy_query_list.append((author_operator, " OR ".join(author_query)))
# Return a list of unique queries
return list(set(fuzzy_query_list))
def _clean_query(self):
"""
This function will remove erroneous characters and combinations from
a the generated search query that might cause problems when searching.
@return: cleaned query
@rtype: str
"""
#FIXME: Extend cleaning to account for encodings and LaTeX symbols
query = self.query.replace("''", "")
query = query.replace('""', "")
return query
def _convert_qrystr(self, qrystr):
"""
Converts old-style query-strings into new-style.
"""
fields = qrystr.split("||")
converted_query = []
for field in fields:
converted_query.append("[%s]" % (field,))
return self.operator.join(converted_query)
def _extract_fieldvalues(self, record, qrystr):
"""
Extract all the values in the given record referenced in the given query-string
and attach them to self.fields as a list. Return boolean indicating if a query
is complete, and a list of all field references found.
Field references is checked to be valid MARC tag references and all values
found are added to self.fields as a list, hashed by the full reference including
prefix and suffix.
If ascii_mode is enabled, the record values will be translated to its ascii
representation.
e.g. for the query-string: 700__a:"[700__a]"
{ ('700__a:"', '700__a', '"') : ["Ellis, J.", "Olive, K. A."]}
Should no values be found for a field references, the query will be flagged
as incomplete.
@param record: bibrecord to retrive field-values from
@type record: dict
@param qrystr: proper query string template. (i.e. title:[245__a])
defaults to: [title]
@type qrystr: str
@return: complete flag, [field references found]
@rtype: tuple
"""
complete = True
fieldtags_found = []
# Find all potential references to record tag values and
# add to fields-dict as a list of values using field-name tuple as key.
#
# Each reference will be split into prefix, field-ref and suffix.
# Example:
# 773__p:"[773__p]" 100__a:/.*[100__a].*/ =>
# [('773__p:"', '773__p', '"'), ('100__a:/.*', '100__a', '.*/')]
for field_prefix, field_reference, field_suffix in re_querystring.findall(qrystr):
# First we see if there is any special formats for this field_reference
# The returned value from _extract_formats is the field-name stripped from formats.
# e.g. 245__a::SUP(NUM) => 245__a
fieldname = self._extract_formats(field_reference)
# We need everything in lower-case
field_prefix = field_prefix.lower()
field_suffix = field_suffix.lower()
# Find proper MARC tag(s) for the stripped field-name, if fieldname is used.
# e.g. author -> [100__a, 700__a]
# FIXME: Local instance only!
tag_list = get_field_tags_from_fieldname(fieldname)
if len(tag_list) == 0:
tag_list = [fieldname]
for field in tag_list:
# Check if it is really a reference to a tag to not confuse with e.g. regex syntax
tag_structure = validate_tag(field)
if tag_structure != None:
tag, ind1, ind2, code = tag_structure
value_list = record_get_field_values(record, tag, ind1, ind2, code)
if len(value_list) > 0:
# Apply any BibConvert formatting functions to each value
updated_value_list = self._apply_formats(fieldname, value_list)
# Also remove any errornous XML entities. I.e. & -> &
updated_value_list = [xml_entities_to_utf8(v, skip=[]) \
for v in updated_value_list]
if self.ascii_mode:
updated_value_list = translate_to_ascii(updated_value_list)
# Store found values linked to full field reference tuple including
# (prefix, field, suffix)
self.fields[(field_prefix,
fieldname,
field_suffix)] = updated_value_list
else:
# No values found. The query is deemed incomplete
complete = False
fieldtags_found.append("%s[%s]%s" % (field_prefix, fieldname, field_suffix))
return complete, fieldtags_found
def _extract_formats(self, field_reference):
"""
Looks for BibConvert formats within query-strings and adds to
the instance. Formats are defined by one or more '::' followed
by a format keyword which is defined in BibConvert FormatField()
method.
The function also removes the references to formatting functions
in the query (self.pattern)
Returns the field_reference reference, with formats stripped.
"""
field_parts = field_reference.split("::")
if len(field_parts) > 1:
# Remove any references to BibConvert functions in pattern. e.g. 245__a::SUP(PUNCT, ) -> 245__a
# self.pattern is lower cased. Returned value is field-name stripped from formats.
for aformat in field_parts[1:]:
self.formats.setdefault(field_parts[0], []).append(aformat)
self.pattern = self.pattern.replace("[%s]" % (field_reference.lower(),), "[%s]" % (field_parts[0],))
return field_parts[0]
def _apply_formats(self, fieldname, value_list):
"""
Apply the current stored BibConvert formating operations for a
field-name to the given list of strings. The list is then returned.
@param fieldname: name of field - used as key in the formats dict
@type fieldname: string
@param value_list: list of strings to apply formats to
@type value_list: list
@return: list of values with formatting functions applied
@rtype: list
"""
if fieldname in self.formats:
new_list = []
for value in value_list:
if value.strip() != "":
# Apply BibConvert formats if applicable
for aformat in self.formats[fieldname]:
value = bibconvert.FormatField(value, aformat)
new_list.append(value)
return new_list
else:
return value_list
def get_field_tags_from_fieldname(field):
"""
Gets list of field 'field' for the record with 'sysno' system number from the database.
"""
query = "select tag.value from tag left join field_tag on tag.id=field_tag.id_tag " \
+ "left join field on field_tag.id_field=field.id where field.code='%s'" % (field,)
out = []
res = run_sql(query)
for row in res:
out.append(row[0])
return out
def cproduct(args):
"""
Returns the Cartesian product of passed arguments as a list of tuples.
'12','34' -> ('1', '3'), ('1', '4'), ('2', '3'), ('2', '4')
@param args: iterable with elements to compute
@type args: iterable
@return list containing tuples for each computed combination
@rtype list of tuples
Based on http://docs.python.org/library/itertools.html#itertools.product
"""
values = map(tuple, args)
result = [[]]
for value in values:
result = [x + [y] for x in result for y in value]
return [tuple(res) for res in result]
def bylen(word1, word2):
""" Sort comparison method that compares by length """
return len(word1) - len(word2)
def get_longest_words(wstr, limit=5):
"""
Select the longest words for matching. It selects the longest words from
the string, according to a given limit of words. By default the 5 longest word are selected
@param wstr: string to extract the longest words from
@type wstr: str
@param limit: maximum number of words extracted
@type limit: int
@return: list of long words
@rtype: list
"""
words = []
if wstr:
# Protect spaces within quotes
wstr = re_pattern_single_quotes.sub(
lambda x: "'" + string.replace(x.group(1), ' ', '__SPACE__') + "'",
wstr)
wstr = re_pattern_double_quotes.sub(
lambda x: "\"" + string.replace(x.group(1), ' ', '__SPACE__') + "\"",
wstr)
wstr = re_pattern_regexp_quotes.sub(
lambda x: "/" + string.replace(x.group(1), ' ', '__SPACE__') + "/",
wstr)
# and spaces after colon as well:
wstr = re_pattern_spaces_after_colon.sub(
lambda x: string.replace(x.group(1), ' ', '__SPACE__'),
wstr)
words = wstr.split()
for i in range(len(words)):
words[i] = words[i].replace('__SPACE__', ' ')
words.sort(cmp=bylen)
words.reverse()
words = words[:limit]
return words
def add_recid(record, recid):
"""
Add a given record-id to the record as $$001 controlfield. If an 001 field already
exists it will be replaced.
@param record: the record to retrive field-values from
@type record: a bibrecord instance
@param recid: record-id to be added
@type recid: int
"""
if record_has_field(record, '001'):
record_modify_controlfield(record, '001', \
controlfield_value=str(recid), \
field_position_global=1)
else:
record_add_field(record, '001', controlfield_value=str(recid))
def match_result_output(bibmatch_recid, recID_list, server_url, query, matchmode="no match"):
"""
Generates result as XML comments from passed record and matching parameters.
@param bibmatch_recid: BibMatch record identifier
@type bibmatch_recid: int
@param recID_list: record matched with record
@type recID_list: list
@param server_url: url to the server the matching has been performed
@type server_url: str
@param query: matching query
@type query: str
@param matchmode: matching type
@type matchmode: str
@rtype str
@return XML result string
"""
result = ["<!-- BibMatch-Matching-Results: -->", \
"<!-- BibMatch-Matching-Record-Identifier: %s -->" % (bibmatch_recid,)]
for recID in recID_list:
result.append("<!-- BibMatch-Matching-Found: %s/%s/%s -->" \
% (server_url, CFG_SITE_RECORD, recID))
result.append("<!-- BibMatch-Matching-Mode: %s -->" \
% (matchmode,))
result.append("<!-- BibMatch-Matching-Criteria: %s -->" \
% (query,))
return "\n".join(result)
def match_records(records, qrystrs=None, search_mode=None, operator="and", \
verbose=1, server_url=CFG_SITE_SECURE_URL, modify=0, \
sleeptime=CFG_BIBMATCH_LOCAL_SLEEPTIME, \
clean=False, collections=[], user="", password="", \
fuzzy=True, validate=True, ascii_mode=False,
insecure_login=False):
"""
Match passed records with existing records on a local or remote Invenio
installation. Returns which records are new (no match), which are matched,
which are ambiguous and which are fuzzy-matched. A formatted result of each
records matching are appended to each record tuple:
(record, status_code, list_of_errors, result)
@param records: records to analyze
@type records: list of records
@param qrystrs: list of tuples (field, querystring)
@type qrystrs: list
@param search_mode: if mode is given, the search will perform an advanced
query using the desired mode. Otherwise 'simple search'
is used.
@type search_mode: str
@param operator: operator used to concatenate values of fields occurring more then once.
Valid types are: AND, OR. Defaults to AND.
@type operator: str
@param verbose: be loud
@type verbose: int
@param server_url: which server to search on. Local installation by default
@type server_url: str
@param modify: output modified records of matches
@type modify: int
@param sleeptime: amount of time to wait between each query
@type sleeptime: float
@param clean: should the search queries be cleaned before passed them along?
@type clean: bool
@param collections: list of collections to search, if specified
@type collections: list
@param user: username in case of authenticated search requests
@type user: string
@param password: password in case of authenticated search requests
@type password: string
@param fuzzy: True to activate fuzzy query matching step
@type fuzzy: bool
@param validate: True to activate match validation
@type validate: bool
@param ascii_mode: True to transform values to its ascii representation
@type ascii_mode: bool
@rtype: list of lists
@return an array of arrays of records, like this [newrecs,matchedrecs,
ambiguousrecs,fuzzyrecs]
"""
newrecs = []
matchedrecs = []
ambiguousrecs = []
fuzzyrecs = []
CFG_BIBMATCH_LOGGER.info("-- BibMatch starting match of %d records --" % (len(records),))
try:
server = InvenioConnector(server_url, user=user, password=password,
insecure_login=insecure_login)
except InvenioConnectorAuthError as error:
if verbose > 0:
sys.stderr.write("Authentication error when connecting to server: %s" \
% (str(error),))
CFG_BIBMATCH_LOGGER.info("-- BibMatch ending match with errors (AuthError) --")
return [newrecs, matchedrecs, ambiguousrecs, fuzzyrecs]
## Go through each record and try to find matches using defined querystrings
record_counter = 0
for record in records:
record_counter += 1
if (verbose > 1):
sys.stderr.write("\n Processing record: #%d .." % (record_counter,))
# At least one (field, querystring) tuple is needed for default search query
if not qrystrs:
qrystrs = [("", "")]
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Started" % (record_counter,))
[matched_results, ambiguous_results, fuzzy_results] = match_record(bibmatch_recid=record_counter,
record=record[0],
server=server,
qrystrs=qrystrs,
search_mode=search_mode,
operator=operator,
verbose=verbose,
sleeptime=sleeptime,
clean=clean,
collections=collections,
fuzzy=fuzzy,
validate=validate,
ascii_mode=ascii_mode)
## Evaluate final results for record
# Add matched record iff number found is equal to one, otherwise return fuzzy,
# ambiguous or no match
if len(matched_results) == 1:
results, query = matched_results[0]
# If one match, add it as exact match, otherwise ambiguous
if len(results) == 1:
if modify:
add_recid(record[0], results[0])
matchedrecs.append((record[0], match_result_output(record_counter, results, server_url, \
query, "exact-matched")))
if (verbose > 1):
sys.stderr.write("Final result: match - %s/record/%s\n" % (server_url, str(results[0])))
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'match'" % (record_counter,))
else:
ambiguousrecs.append((record[0], match_result_output(record_counter, results, server_url, \
query, "ambiguous-matched")))
if (verbose > 1):
sys.stderr.write("Final result: ambiguous\n")
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'ambiguous'" % (record_counter,))
else:
if len(fuzzy_results) > 0:
# Find common record-id for all fuzzy results and grab first query
# as "representative" query
query = fuzzy_results[0][1]
result_lists = []
for res, dummy in fuzzy_results:
result_lists.extend(res)
results = set([res for res in result_lists])
if len(results) == 1:
fuzzyrecs.append((record[0], match_result_output(record_counter, results, server_url, \
query, "fuzzy-matched")))
if (verbose > 1):
sys.stderr.write("Final result: fuzzy\n")
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'fuzzy'" % (record_counter,))
else:
ambiguousrecs.append((record[0], match_result_output(record_counter, results, server_url, \
query, "ambiguous-matched")))
if (verbose > 1):
sys.stderr.write("Final result: ambiguous\n")
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'ambiguous'" % (record_counter,))
elif len(ambiguous_results) > 0:
# Find common record-id for all ambiguous results and grab first query
# as "representative" query
query = ambiguous_results[0][1]
result_lists = []
for res, dummy in ambiguous_results:
result_lists.extend(res)
results = set([res for res in result_lists])
ambiguousrecs.append((record[0], match_result_output(record_counter, results, server_url, \
query, "ambiguous-matched")))
if (verbose > 1):
sys.stderr.write("Final result: ambiguous\n")
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'ambiguous'" % (record_counter,))
else:
newrecs.append((record[0], match_result_output(record_counter, [], server_url, str(qrystrs))))
if (verbose > 1):
sys.stderr.write("Final result: new\n")
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Completed as 'new'" % (record_counter,))
CFG_BIBMATCH_LOGGER.info("-- BibMatch ending match: New(%d), Matched(%d), Ambiguous(%d), Fuzzy(%d) --" % \
(len(newrecs), len(matchedrecs), len(ambiguousrecs), len(fuzzyrecs)))
return [newrecs, matchedrecs, ambiguousrecs, fuzzyrecs]
def match_record(bibmatch_recid, record, server, qrystrs=None, search_mode=None, operator="and", \
verbose=1, sleeptime=CFG_BIBMATCH_LOCAL_SLEEPTIME, \
clean=False, collections=[], fuzzy=True, validate=True, \
ascii_mode=False):
"""
Matches a single record.
@param bibmatch_recid: Current record number. Used for logging.
@type bibmatch_recid: int
@param record: record to match in BibRecord structure
@type record: dict
@param server: InvenioConnector server object
@type server: object
@param qrystrs: list of tuples (field, querystring)
@type qrystrs: list
@param search_mode: if mode is given, the search will perform an advanced
query using the desired mode. Otherwise 'simple search'
is used.
@type search_mode: str
@param operator: operator used to concatenate values of fields occurring more then once.
Valid types are: AND, OR. Defaults to AND.
@type operator: str
@param verbose: be loud
@type verbose: int
@param server_url: which server to search on. Local installation by default
@type server_url: str
@param sleeptime: amount of time to wait between each query
@type sleeptime: float
@param clean: should the search queries be cleaned before passed them along?
@type clean: bool
@param collections: list of collections to search, if specified
@type collections: list
@param fuzzy: True to activate fuzzy query matching step
@type fuzzy: bool
@param validate: True to activate match validation
@type validate: bool
@param ascii_mode: True to transform values to its ascii representation
@type ascii_mode: bool
"""
matched_results = []
ambiguous_results = []
fuzzy_results = []
# Keep a list of generated querystring objects for later use in fuzzy match
query_list = []
# Go through each querystring, trying to find a matching record
# Stops on first valid match, if no exact-match we continue with fuzzy match
for field, qrystr in qrystrs:
querystring = Querystring(operator, clean=clean, ascii_mode=ascii_mode)
query, complete = querystring.create_query(record, qrystr)
if query == "":
if (verbose > 1):
sys.stderr.write("\nEmpty query. Skipping...\n")
# Empty query, no point searching database
continue
query_list.append((querystring, complete, field))
if not complete:
if (verbose > 1):
sys.stderr.write("\nQuery not complete. Flagged as uncertain/ambiguous...\n")
# Determine proper search parameters
if search_mode != None:
search_params = dict(p1=query, f1=field, m1=search_mode, of='id', c=collections)
else:
search_params = dict(p=query, f=field, of='id', c=collections)
if (verbose > 8):
sys.stderr.write("\nSearching with values %s\n" %
(search_params,))
CFG_BIBMATCH_LOGGER.info("Searching with values %s" % (search_params,))
## Perform the search with retries
try:
result_recids = server.search_with_retry(**search_params)
except InvenioConnectorAuthError as error:
if verbose > 0:
sys.stderr.write("Authentication error when searching: %s" \
% (str(error),))
break
sleep(sleeptime)
## Check results:
if len(result_recids) > 0:
# Matches detected
CFG_BIBMATCH_LOGGER.info("Results: %s" % (result_recids[:15],))
if len(result_recids) > CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT:
# Too many matches, treat as non-match
if (verbose > 8):
sys.stderr.write("result=More then %d results...\n" % \
(CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT,))
continue
if (verbose > 8):
sys.stderr.write("result=%s\n" % (result_recids,))
if validate:
# Validation can be run
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Query (%s) found %d records: %s" % \
(bibmatch_recid,
query,
len(result_recids),
str(result_recids)))
exact_matches = []
fuzzy_matches = []
try:
exact_matches, fuzzy_matches = validate_matches(bibmatch_recid=bibmatch_recid, \
record=record, \
server=server, \
result_recids=result_recids, \
collections=collections, \
verbose=verbose, \
ascii_mode=ascii_mode)
except BibMatchValidationError, e:
sys.stderr.write("ERROR: %s\n" % (str(e),))
if len(exact_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated\n")
matched_results.append((exact_matches, query))
break
elif len(fuzzy_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated fuzzily\n")
fuzzy_results.append((fuzzy_matches, query))
continue
else:
if (verbose > 8):
sys.stderr.write("Match could not be validated\n")
else:
# No validation
# Ambiguous match
if len(result_recids) > 1:
ambiguous_results.append((result_recids, query))
if (verbose > 8):
sys.stderr.write("Ambiguous\n")
continue
# Match
elif len(result_recids) == 1:
if complete:
matched_results.append((result_recids, query))
if (verbose > 8):
sys.stderr.write("Match\n")
# This was a complete match, so let's break out to avoid more searching
break
else:
# We treat the result as ambiguous (uncertain) when query is not complete
# and we are not validating it.
ambiguous_results.append((result_recids, query))
if (verbose > 8):
sys.stderr.write("Ambiguous\n")
continue
# No match
if (verbose > 8):
sys.stderr.write("result=No matches\n")
# No complete matches, lets try fuzzy matching of all the queries
else:
if fuzzy:
if (verbose > 8):
sys.stderr.write("\nFuzzy query mode...\n")
## Fuzzy matching: Analyze all queries and perform individual searches, then intersect results.
for querystring, complete, field in query_list:
result_hitset = None
if (verbose > 8):
sys.stderr.write("\n Start new search ------------ \n")
fuzzy_query_list = querystring.fuzzy_queries()
empty_results = 0
# Go through every expression in the query and generate fuzzy searches
for current_operator, qry in fuzzy_query_list:
current_resultset = None
if qry == "":
if (verbose > 1):
sys.stderr.write("\nEmpty query. Skipping...\n")
# Empty query, no point searching database
continue
search_params = dict(p=qry, f=field, of='id', c=collections)
CFG_BIBMATCH_LOGGER.info("Fuzzy searching with values %s" % (search_params,))
try:
current_resultset = server.search_with_retry(**search_params)
except InvenioConnectorAuthError as error:
if (verbose > 0):
sys.stderr.write("Authentication error when searching: %s" \
% (str(error),))
break
CFG_BIBMATCH_LOGGER.info("Results: %s" % (current_resultset[:15],))
if (verbose > 8):
if len(current_resultset) > CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT:
sys.stderr.write("\nSearching with values %s result=%s\n" %
(search_params, "More then %d results..." % \
(CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT,)))
else:
sys.stderr.write("\nSearching with values %s result=%s\n" %
(search_params, current_resultset))
sleep(sleeptime)
if current_resultset == None:
continue
if current_resultset == [] and empty_results < CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT:
# Allows some empty results
empty_results += 1
else:
# Intersect results with previous results depending on current operator
if result_hitset == None:
result_hitset = current_resultset
if current_operator == '+':
result_hitset = list(set(result_hitset) & set(current_resultset))
elif current_operator == '-':
result_hitset = list(set(result_hitset) - set(current_resultset))
elif current_operator == '|':
result_hitset = list(set(result_hitset) | set(current_resultset))
else:
# We did not hit a break in the for-loop: we were allowed to search.
if result_hitset and len(result_hitset) > CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT:
if (verbose > 1):
sys.stderr.write("\nToo many results... %d " % (len(result_hitset)))
elif result_hitset:
# This was a fuzzy match
query_out = " ".join(["%s %s" % (op, qu) for op, qu in fuzzy_query_list])
if validate:
# We can run validation
CFG_BIBMATCH_LOGGER.info("Matching of record %d: Fuzzy query (%s) found %d records: %s" % \
(bibmatch_recid,
query_out,
len(result_hitset),
str(result_hitset)))
exact_matches = []
fuzzy_matches = []
try:
exact_matches, fuzzy_matches = validate_matches(bibmatch_recid=bibmatch_recid, \
record=record, \
server=server, \
result_recids=result_hitset, \
collections=collections, \
verbose=verbose, \
ascii_mode=ascii_mode)
except BibMatchValidationError, e:
sys.stderr.write("ERROR: %s\n" % (str(e),))
if len(exact_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated\n")
matched_results.append((exact_matches, query_out))
break
elif len(fuzzy_matches) > 0:
if (verbose > 8):
sys.stderr.write("Match validated fuzzily\n")
fuzzy_results.append((fuzzy_matches, query_out))
else:
if (verbose > 8):
sys.stderr.write("Match could not be validated\n")
else:
# No validation
if len(result_hitset) == 1 and complete:
fuzzy_results.append((result_hitset, query_out))
if (verbose > 8):
sys.stderr.write("Fuzzy: %s\n" % (result_hitset,))
else:
# We treat the result as ambiguous (uncertain) when:
# - query is not complete
# - more then one result
ambiguous_results.append((result_hitset, query_out))
if (verbose > 8):
sys.stderr.write("Ambiguous\n")
return [matched_results, ambiguous_results, fuzzy_results]
def transform_input_to_marcxml(filename=None, file_input=""):
"""
Takes the filename or input of text-marc and transforms it
to MARCXML.
"""
if not filename:
# Create temporary file to read from
tmp_fd, filename = mkstemp()
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def bibrecs_has_errors(bibrecs):
"""
Utility function to check a list of parsed BibRec objects, directly
from the output of bibrecord.create_records(), for any
badly parsed records.
If an error-code is present in the result the function will return True,
otherwise False.
"""
return 0 in [err_code for dummy, err_code, dummy2 in bibrecs]
def main():
"""
Record matches database content when defined search gives
exactly one record in the result set. By default the match is
done on the title field.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "0123hVm:fq:c:nv:o:b:i:r:tazx:",
[
"print-new",
"print-match",
"print-ambiguous",
"print-fuzzy",
"help",
"version",
"mode=",
"field=",
"query-string=",
"config=",
"no-process",
"verbose=",
"operator=",
"batch-output=",
"input=",
"remote=",
"text-marc-output",
"alter-recid",
"clean",
"collection=",
"user=",
"no-fuzzy",
"no-validation",
"ascii"
])
except getopt.GetoptError as e:
usage()
match_results = []
qrystrs = [] # list of query strings
print_mode = 0 # default match mode to print new records
noprocess = 0 # dump result in stdout?
operator = "and"
verbose = 1 # 0..be quiet
records = []
batch_output = "" # print stuff in files
f_input = "" # read from where, if param "i"
server_url = CFG_SITE_SECURE_URL # url to server performing search, local by default
modify = 0 # alter output with matched record identifiers
textmarc_output = 0 # output in MARC instead of MARCXML
field = ""
search_mode = None # activates a mode, uses advanced search instead of simple
sleeptime = CFG_BIBMATCH_LOCAL_SLEEPTIME # the amount of time to sleep between queries, changes on remote queries
clean = False # should queries be sanitized?
collections = [] # only search certain collections?
user = ""
password = ""
validate = True # should matches be validate?
fuzzy = True # Activate fuzzy-mode if no matches found for a record
ascii_mode = False # Should values be turned into ascii mode
for opt, opt_value in opts:
if opt in ["-0", "--print-new"]:
print_mode = 0
if opt in ["-1", "--print-match"]:
print_mode = 1
if opt in ["-2", "--print-ambiguous"]:
print_mode = 2
if opt in ["-3", "--print-fuzzy"]:
print_mode = 3
if opt in ["-n", "--no-process"]:
noprocess = 1
if opt in ["-h", "--help"]:
usage()
sys.exit(0)
if opt in ["-V", "--version"]:
print(__revision__)
sys.exit(0)
if opt in ["-t", "--text-marc-output"]:
textmarc_output = 1
if opt in ["-v", "--verbose"]:
verbose = int(opt_value)
if opt in ["-f", "--field"]:
if opt_value in get_fieldcodes():
field = opt_value
if opt in ["-q", "--query-string"]:
try:
template = CFG_BIBMATCH_QUERY_TEMPLATES[opt_value]
qrystrs.append((field, template))
except KeyError:
qrystrs.append((field, opt_value))
if opt in ["-m", "--mode"]:
search_mode = opt_value
if opt in ["-o", "--operator"]:
if opt_value.lower() in ["o", "or", "|"]:
operator = "or"
elif opt_value.lower() in ["a", "and", "&"]:
operator = "and"
if opt in ["-b", "--batch-output"]:
batch_output = opt_value
if opt in ["-i", "--input"]:
f_input = opt_value
if opt in ["-r", "--remote"]:
server_url = opt_value
sleeptime = CFG_BIBMATCH_REMOTE_SLEEPTIME
if opt in ["-a", "--alter-recid"]:
modify = 1
if opt in ["-z", "--clean"]:
clean = True
if opt in ["-c", "--config"]:
config_file = opt_value
config_file_read = bibconvert.read_file(config_file, 0)
for line in config_file_read:
tmp = line.split("---")
if(tmp[0] == "QRYSTR"):
qrystrs.append((field, tmp[1]))
if opt in ["-x", "--collection"]:
colls = opt_value.split(',')
for collection in colls:
if collection not in collections:
collections.append(collection)
if opt in ["--user"]:
user = opt_value
password = getpass.getpass()
if opt == "--no-fuzzy":
fuzzy = False
if opt == "--no-validation":
validate = False
if opt == "--ascii":
ascii_mode = True
if verbose:
sys.stderr.write("\nBibMatch: Parsing input file %s..." % (f_input,))
read_list = []
if not f_input:
for line_in in sys.stdin:
read_list.append(line_in)
else:
f = open(f_input)
for line_in in f:
read_list.append(line_in)
f.close()
file_read = "".join(read_list)
# Detect input type
if not file_read.strip().startswith('<'):
# Not xml, assume type textmarc
file_read = transform_input_to_marcxml(f_input, file_read)
records = create_records(file_read)
if len(records) == 0:
if verbose:
sys.stderr.write("\nBibMatch: Input file contains no records.\n")
sys.exit(1)
# Check for any parsing errors in records
if bibrecs_has_errors(records):
# Errors found. Let's try to remove any XML entities
if verbose > 8:
sys.stderr.write("\nBibMatch: Parsing error. Trying removal of XML entities..\n")
file_read = xml_entities_to_utf8(file_read)
records = create_records(file_read)
if bibrecs_has_errors(records):
# Still problems.. alert the user and exit
if verbose:
errors = "\n".join([str(err_msg) for dummy, err_code, err_msg in records \
if err_code == 0])
sys.stderr.write("\nBibMatch: Errors during record parsing:\n%s\n" % \
(errors,))
sys.exit(1)
if verbose:
sys.stderr.write("read %d records" % (len(records),))
sys.stderr.write("\nBibMatch: Matching ...")
if not validate:
if verbose:
sys.stderr.write("\nWARNING: Skipping match validation.\n")
match_results = match_records(records=records,
qrystrs=qrystrs,
search_mode=search_mode,
operator=operator,
verbose=verbose,
server_url=server_url,
modify=modify,
sleeptime=sleeptime,
clean=clean,
collections=collections,
user=user,
password=password,
fuzzy=fuzzy,
validate=validate,
ascii_mode=ascii_mode)
# set the output according to print..
# 0-newrecs 1-matchedrecs 2-ambiguousrecs 3-fuzzyrecs
recs_out = match_results[print_mode]
if verbose:
sys.stderr.write("\n\n Bibmatch report\n")
sys.stderr.write("=" * 35)
sys.stderr.write("\n New records : %d" % (len(match_results[0]),))
sys.stderr.write("\n Matched records : %d" % (len(match_results[1]),))
sys.stderr.write("\n Ambiguous records : %d" % (len(match_results[2]),))
sys.stderr.write("\n Fuzzy records : %d\n" % (len(match_results[3]),))
sys.stderr.write("=" * 35)
sys.stderr.write("\n Total records : %d\n" % (len(records),))
sys.stderr.write("\n See detailed log at %s\n" % (CFG_LOGFILE,))
if not noprocess and recs_out:
print('<collection xmlns="http://www.loc.gov/MARC21/slim">')
for record, results in recs_out:
if textmarc_output:
# FIXME: textmarc output does not print matching results
print(transform_record_to_marc(record))
else:
print(results)
print(record_xml_output(record))
print("</collection>")
if batch_output:
i = 0
outputs = ['new', 'matched', 'ambiguous', 'fuzzy']
for result in match_results:
out = []
out.append('<collection xmlns="http://www.loc.gov/MARC21/slim">')
for record, results in result:
if textmarc_output:
# FIXME: textmarc output does not print matching results
out.append(transform_record_to_marc(record))
else:
out.append(results)
out.append(record_xml_output(record))
out.append("</collection>")
filename = "%s.%s.xml" % (batch_output, outputs[i])
file_fd = open(filename, "w")
file_fd.write("\n".join(out))
file_fd.close()
i += 1
|
egabancho/invenio
|
invenio/legacy/bibmatch/engine.py
|
Python
|
gpl-2.0
| 64,532
|
# This file is part of the qpopplerview package.
#
# Copyright (c) 2010 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Caching of generated images.
"""
import time
import weakref
try:
import popplerqt4
except ImportError:
from . import popplerqt4_dummy as popplerqt4
from PyQt4.QtCore import Qt, QThread
from . import render
from . import rectangles
from .locking import lock
__all__ = ['maxsize', 'setmaxsize', 'image', 'generate', 'clear', 'links', 'options']
_cache = weakref.WeakKeyDictionary()
_schedulers = weakref.WeakKeyDictionary()
_options = weakref.WeakKeyDictionary()
_links = weakref.WeakKeyDictionary()
# cache size
_maxsize = 104857600 # 100M
_currentsize = 0
_globaloptions = None
def setmaxsize(maxsize):
"""Sets the maximum cache size in Megabytes."""
global _maxsize
_maxsize = maxsize * 1048576
purge()
def maxsize():
"""Returns the maximum cache size in Megabytes."""
return _maxsize / 1048576
def clear(document=None):
"""Clears the whole cache or the cache for the given Poppler.Document."""
if document:
try:
del _cache[document]
except KeyError:
pass
else:
_cache.clear()
global _currentsize
_currentsize = 0
def image(page, exact=True):
"""Returns a rendered image for given Page if in cache.
If exact is True (default), the function returns None if the exact size was
not in the cache. If exact is False, the function may return a temporary
rendering of the page scaled from a different size, if that was available.
"""
document = page.document()
pageKey = (page.pageNumber(), page.rotation())
sizeKey = (page.width(), page.height())
if exact:
try:
entry = _cache[document][pageKey][sizeKey]
except KeyError:
return
else:
entry[1] = time.time()
return entry[0]
try:
sizes = _cache[document][pageKey].keys()
except KeyError:
return
# find the closest size (assuming aspect ratio has not changed)
if sizes:
sizes = sorted(sizes, key=lambda s: abs(1 - s[0] / float(page.width())))
return _cache[document][pageKey][sizes[0]][0]
def generate(page):
"""Schedule an image to be generated for the cache."""
# Poppler-Qt4 crashes when different pages from a Document are rendered at the same time,
# so we schedule them to be run in sequence.
document = page.document()
try:
scheduler = _schedulers[document]
except KeyError:
scheduler = _schedulers[document] = Scheduler()
scheduler.schedulejob(page)
def add(image, document, pageNumber, rotation, width, height):
"""(Internal) Adds an image to the cache."""
pageKey = (pageNumber, rotation)
sizeKey = (width, height)
_cache.setdefault(document, {}).setdefault(pageKey, {})[sizeKey] = [image, time.time()]
# maintain cache size
global _maxsize, _currentsize
_currentsize += image.byteCount()
if _currentsize > _maxsize:
purge()
def purge():
"""Removes old images from the cache to limit the space used.
(Not necessary to call, as the cache will monitor its size automatically.)
"""
# make a list of the images, sorted on time, newest first
images = iter(sorted((
(time, document, pageKey, sizeKey, image.byteCount())
for document, pageKeys in _cache.items()
for pageKey, sizeKeys in pageKeys.items()
for sizeKey, (image, time) in sizeKeys.items()),
reverse=True))
# sum the size of the newest images
global _maxsize, _currentsize
byteCount = 0
for item in images:
byteCount += item[4]
if byteCount > _maxsize:
break
_currentsize = byteCount
# delete the other images
for time, document, pageKey, sizeKey, byteCount in images:
del _cache[document][pageKey][sizeKey]
def links(page):
"""Returns a position-searchable list of the links in the page."""
document, pageNumber = page.document(), page.pageNumber()
try:
return _links[document][pageNumber]
except KeyError:
with lock(document):
links = rectangles.Rectangles(document.page(pageNumber).links(),
lambda link: link.linkArea().normalized().getCoords())
_links.setdefault(document, {})[pageNumber] = links
return links
def options(document=None):
"""Returns a RenderOptions object for a document or the global one if no document is given."""
global _globaloptions, _options
if document:
try:
return _options[document]
except KeyError:
result = _options[document] = render.RenderOptions()
return result
if not _globaloptions:
_globaloptions = render.RenderOptions()
# enable antialiasing by default
_globaloptions.setRenderHint(popplerqt4.Poppler.Document.Antialiasing |
popplerqt4.Poppler.Document.TextAntialiasing)
return _globaloptions
def setoptions(options, document=None):
"""Sets a RenderOptions instance for the given document or as the global one if no document is given.
Use None for the options to unset (delete) the options.
"""
global _globaloptions, _options
if not document:
_globaloptions = options
elif options:
_options[document] = options
else:
try:
del _options[document]
except KeyError:
pass
class Scheduler(object):
"""Manages running rendering jobs in sequence for a Document."""
def __init__(self):
self._schedule = [] # order
self._jobs = {} # jobs on key
self._waiting = weakref.WeakKeyDictionary() # jobs on page
self._running = None
def schedulejob(self, page):
"""Creates or retriggers an existing Job.
If a Job was already scheduled for the page, it is canceled.
The page's update() method will be called when the Job has completed.
"""
# uniquely identify the image to be generated
key = (page.pageNumber(), page.rotation(), page.width(), page.height())
try:
job = self._jobs[key]
except KeyError:
job = self._jobs[key] = Job(page)
job.key = key
else:
self._schedule.remove(job)
self._schedule.append(job)
self._waiting[page] = job
self.checkStart()
def checkStart(self):
"""Starts a job if none is running and at least one is waiting."""
while self._schedule and not self._running:
job = self._schedule[-1]
document = job.document()
if document and job in self._waiting.values():
self._running = Runner(self, document, job)
break
else:
self.done(job)
def done(self, job):
"""Called when the job has completed."""
del self._jobs[job.key]
self._schedule.remove(job)
self._running = None
for page in list(self._waiting):
if self._waiting[page] is job:
page.update()
del self._waiting[page]
class Job(object):
"""Simply contains data needed to create an image later."""
def __init__(self, page):
self.document = weakref.ref(page.document())
self.pageNumber = page.pageNumber()
self.rotation = page.rotation()
self.width = page.width()
self.height = page.height()
class Runner(QThread):
"""Immediately runs a Job in a background thread."""
def __init__(self, scheduler, document, job):
super(Runner, self).__init__()
self.scheduler = scheduler
self.job = job
self.document = document # keep reference now so that it does not die during this thread
self.finished.connect(self.slotFinished)
self.start()
def run(self):
"""Main method of this thread, called by Qt on start()."""
page = self.document.page(self.job.pageNumber)
pageSize = page.pageSize()
if self.job.rotation & 1:
pageSize.transpose()
xres = 72.0 * self.job.width / pageSize.width()
yres = 72.0 * self.job.height / pageSize.height()
threshold = options().oversampleThreshold() or options(self.document).oversampleThreshold()
multiplier = 2 if xres < threshold else 1
with lock(self.document):
options().write(self.document)
options(self.document).write(self.document)
self.image = page.renderToImage(xres * multiplier, yres * multiplier, 0, 0, self.job.width * multiplier, self.job.height * multiplier, self.job.rotation)
if multiplier == 2:
self.image = self.image.scaledToWidth(self.job.width, Qt.SmoothTransformation)
def slotFinished(self):
"""Called when the thread has completed."""
add(self.image, self.document, self.job.pageNumber, self.job.rotation, self.job.width, self.job.height)
self.scheduler.done(self.job)
self.scheduler.checkStart()
|
shimpe/frescobaldi
|
frescobaldi_app/qpopplerview/cache.py
|
Python
|
gpl-2.0
| 10,059
|
# server.py -- Implementation of the server side git protocols
# Copryight (C) 2008 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# or (at your option) a later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import select
import socket
from dulwich.protocol import Protocol, TCP_GIT_PORT, extract_capabilities
class SimpleFetchGraphWalker(object):
def __init__(self, local_heads, get_parents):
self.heads = set(local_heads)
self.get_parents = get_parents
self.parents = {}
def ack(self, ref):
if ref in self.heads:
self.heads.remove(ref)
if ref in self.parents:
for p in self.parents[ref]:
self.ack(p)
def next(self):
if self.heads:
ret = self.heads.pop()
ps = self.get_parents(ret)
self.parents[ret] = ps
self.heads.update(ps)
return ret
return None
class GitClient(object):
"""Git smart server client.
"""
def __init__(self, fileno, read, write):
self.proto = Protocol(read, write)
self.fileno = fileno
def capabilities(self):
return "multi_ack side-band-64k thin-pack ofs-delta"
def read_refs(self):
server_capabilities = None
refs = {}
# Receive refs from server
for pkt in self.proto.read_pkt_seq():
(sha, ref) = pkt.rstrip("\n").split(" ", 1)
if server_capabilities is None:
(ref, server_capabilities) = extract_capabilities(ref)
if not (ref == "capabilities^{}" and sha == "0" * 40):
refs[ref] = sha
return refs, server_capabilities
def send_pack(self, path):
refs, server_capabilities = self.read_refs()
changed_refs = [] # FIXME
if not changed_refs:
self.proto.write_pkt_line(None)
return
self.proto.write_pkt_line("%s %s %s\0%s" % (changed_refs[0][0], changed_refs[0][1], changed_refs[0][2], self.capabilities()))
want = []
have = []
for changed_ref in changed_refs[:]:
self.proto.write_pkt_line("%s %s %s" % changed_refs)
want.append(changed_refs[1])
if changed_refs[0] != "0"*40:
have.append(changed_refs[0])
self.proto.write_pkt_line(None)
# FIXME: This is implementation specific
# shas = generate_pack_contents(want, have, None)
# write_pack_data(self.write, shas, len(shas))
def fetch_pack(self, path, determine_wants, graph_walker, pack_data, progress):
"""Retrieve a pack from a git smart server.
:param determine_wants: Callback that returns list of commits to fetch
:param graph_walker: Object with next() and ack().
:param pack_data: Callback called for each bit of data in the pack
:param progress: Callback for progress reports (strings)
"""
(refs, server_capabilities) = self.read_refs()
wants = determine_wants(refs)
if not wants:
self.proto.write_pkt_line(None)
return
self.proto.write_pkt_line("want %s %s\n" % (wants[0], self.capabilities()))
for want in wants[1:]:
self.proto.write_pkt_line("want %s\n" % want)
self.proto.write_pkt_line(None)
have = graph_walker.next()
while have:
self.proto.write_pkt_line("have %s\n" % have)
if len(select.select([self.fileno], [], [], 0)[0]) > 0:
pkt = self.proto.read_pkt_line()
parts = pkt.rstrip("\n").split(" ")
if parts[0] == "ACK":
graph_walker.ack(parts[1])
assert parts[2] == "continue"
have = graph_walker.next()
self.proto.write_pkt_line("done\n")
pkt = self.proto.read_pkt_line()
while pkt:
parts = pkt.rstrip("\n").split(" ")
if parts[0] == "ACK":
graph_walker.ack(pkt.split(" ")[1])
if len(parts) < 3 or parts[2] != "continue":
break
pkt = self.proto.read_pkt_line()
for pkt in self.proto.read_pkt_seq():
channel = ord(pkt[0])
pkt = pkt[1:]
if channel == 1:
pack_data(pkt)
elif channel == 2:
progress(pkt)
else:
raise AssertionError("Invalid sideband channel %d" % channel)
class TCPGitClient(GitClient):
def __init__(self, host, port=TCP_GIT_PORT):
self._socket = socket.socket(type=socket.SOCK_STREAM)
self._socket.connect((host, port))
self.rfile = self._socket.makefile('rb', -1)
self.wfile = self._socket.makefile('wb', 0)
self.host = host
super(TCPGitClient, self).__init__(self._socket.fileno(), self.rfile.read, self.wfile.write)
def send_pack(self, path):
self.proto.send_cmd("git-receive-pack", path, "host=%s" % self.host)
super(TCPGitClient, self).send_pack(path)
def fetch_pack(self, path, determine_wants, graph_walker, pack_data, progress):
self.proto.send_cmd("git-upload-pack", path, "host=%s" % self.host)
super(TCPGitClient, self).fetch_pack(path, determine_wants, graph_walker, pack_data, progress)
|
harsh-a1/repeater-testing
|
dulwich/dulwich/client.py
|
Python
|
gpl-2.0
| 5,919
|
import sys
import seq
import os
from logger import Logger
"""
right now this just chooses the longest
BEWARE, this writes over the file
"""
if __name__ == "__main__":
if len(sys.argv) != 4 and len(sys.argv) != 5:
print("python "+sys.argv[0]+" table clusterdir fending [logfile]")
sys.exit(0)
fend = sys.argv[3]
LOGFILE = "pyphlawd.log"
if len(sys.argv) == 5:
LOGFILE = sys.argv[4]
log = Logger(LOGFILE)
log.a()
tab = open(sys.argv[1],"r")
idn = {}
for i in tab:
spls = i.strip().split("\t")
idn[spls[3]] = spls[4]
tab.close()
dirr = sys.argv[2]
for o in os.listdir(dirr):
if fend != None:
if fend not in o:
continue
seqs = {}
for i in seq.read_fasta_file_iter(dirr+"/"+o):
if idn[i.name] not in seqs:
seqs[idn[i.name]] = []
seqs[idn[i.name]].append(i)
for i in seqs:
if len(seqs[i]) > 1:
longest = None
longestn = 0
for j in seqs[i]:
if len(j.seq) > longestn:
longest = j
longestn = len(j.seq)
seqs[i] = [longest]
fn = open(dirr+"/"+o,"w")
for i in seqs:
for j in seqs[i]:
fn.write(j.get_fasta())
fn.close()
log.c()
|
FePhyFoFum/PyPHLAWD
|
src/choose_one_species_cluster.py
|
Python
|
gpl-2.0
| 1,407
|
from mmap import mmap
## exiftool -xmp-dc:subject=Metadatos XMP de prueba ./test.gif ##
class gifxmp:
def __init__(self,filename):
self.mmaped = self.__mmap(filename)
self.fst = self.__checkxmp()
if self.fst != -1:
self.xml = self.__loadxmppacket()
print self.xml
## dict = ModuleXML.parsexmp(self.xml) ##
def __mmap(self,filename):
with open(filename,"r+b") as fd:
_ = mmap(fd.fileno(),0)
fd.close()
return _
## Comprueba que el header es correcto, solo se comprobara la existencia de
## unos cuantos caracteres clave en el hader, si es correcto, devuelve el indice
## de la primera aparicion de XMP en la cabecera. Se pivotara a partir de aqui
def __checkxmp(self):
return self.mmaped.find("XMP Data")
## Leemos el paquete, boundary primera ocurrencia del header + 12 bytes de la comprobacion, hasta el mismo + 256
## 256 -> (258 - 2 bytes del block terminate)
def __loadxmppacket(self):
blcktrmt = self.mmaped.find("\x00\x00",self.fst,self.mmaped.size())
return self.mmaped[self.fst+11:blcktrmt-256]
if __name__ == "__main__":
inst = gifxmp("test.gif")
|
overxfl0w/Grampus-Forensic-Utils
|
Metadata/Image/XMP/GIF/gifxmp.py
|
Python
|
gpl-2.0
| 1,117
|
# -*- coding: utf-8 -*-
##
## test_account.py
## Login : <dax@happycoders.org>
## Started on Wed Feb 14 08:23:17 2007 David Rousselie
## $Id$
##
## Copyright (C) 2007 David Rousselie
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
import unittest
import thread
from jcl.tests import JCLTestCase
import jcl.model as model
from jcl.error import FieldError
from jcl.model.account import Account, PresenceAccount, User
import jmc.model.account
from jmc.model.account import MailAccount, POP3Account, IMAPAccount, \
GlobalSMTPAccount, AbstractSMTPAccount, SMTPAccount
from jmc.lang import Lang
from jcl.model.tests.account import Account_TestCase, \
PresenceAccount_TestCase, InheritableAccount_TestCase, \
ExampleAccount
from jmc.model.tests import email_generator, server
class AccountModule_TestCase(unittest.TestCase):
def test_validate_login_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
None, None, None)
def test_validate_login_with_login_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
"login with spaces", None, None)
def test_validate_host_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
None, None, None)
def test_validate_host_with_host_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
"host with spaces", None, None)
class MailAccount_TestCase(PresenceAccount_TestCase):
def setUp(self):
PresenceAccount_TestCase.setUp(self, tables=[MailAccount])
self.account = MailAccount(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com")
self.account_class = MailAccount
def make_test(email_type, tested_func, expected_res):
def inner(self):
encoded, multipart, header = email_type
email = email_generator.generate(encoded,
multipart,
header)
part = tested_func(self, email)
self.assertEquals(part, expected_res)
return inner
test_get_decoded_part_not_encoded = \
make_test((False, False, False), \
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Not encoded single part")
test_get_decoded_part_encoded = \
make_test((True, False, False),
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Encoded single part with 'iso-8859-15' charset (éàê)")
test_format_message_summary_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : not encoded from\nSubject : not encoded subject\n\n",
u"not encoded from"))
test_format_message_summary_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\n",
u"encoded from (éàê)"))
test_format_message_summary_partial_encoded = \
make_test((True, False, True),
lambda self, email: \
email.replace_header("Subject",
"\" " + str(email["Subject"]) \
+ " \" not encoded part") or \
email.replace_header("From",
"\" " + str(email["From"]) \
+ " \" not encoded part") or \
self.account.format_message_summary(email),
(u"From : \"encoded from (éàê)\" not encoded part\nSubject " + \
u": \"encoded subject (éàê)\" not encoded part\n\n",
u"\"encoded from (éàê)\" not encoded part"))
test_format_message_single_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded single part\n",
u"not encoded from"))
test_format_message_single_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\nEncoded single part with 'iso-8859-15' charset" + \
u" (éàê)\n",
u"encoded from (éàê)"))
test_format_message_multi_not_encoded = \
make_test((False, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded multipart1\nNot encoded multipart2\n",
u"not encoded from"))
test_format_message_multi_encoded = \
make_test((True, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject (éà" + \
u"ê)\n\nutf-8 multipart1 with no charset (éàê)" + \
u"\nEncoded multipart2 with 'iso-8859-15' charset (éàê)\n" + \
u"Encoded multipart3 with no charset (éàê)\n",
u"encoded from (éàê)"))
def test_get_default_status_msg(self):
"""
Get default status message for MailAccount.
Should raise NotImplementedError because get_type() method
is not implemented
"""
try:
self.account.get_default_status_msg(Lang.en)
except NotImplementedError:
return
fail("No NotImplementedError raised")
class POP3Account_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, POP3Account])
self.pop3_account = POP3Account(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com",
login="login")
self.pop3_account.password = "pass"
self.pop3_account.host = "localhost"
self.pop3_account.port = 1110
self.pop3_account.ssl = False
model.db_disconnect()
self.account_class = POP3Account
def make_test(responses=None, queries=None, core=None):
def inner(self):
self.server = server.DummyServer("localhost", 1110)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["+OK connected\r\n",
"+OK name is a valid mailbox\r\n",
"+OK pass\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["USER login\r\n",
"PASS pass\r\n"]
if queries:
self.server.queries += queries
self.server.queries += ["QUIT\r\n"]
self.pop3_account.connect()
self.failUnless(self.pop3_account.connection,
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.pop3_account.disconnect()
self.failUnless(self.server.verify_queries(),
"Sended queries does not match expected queries.")
return inner
test_connection = make_test
test_get_mail_list_summary = \
make_test(["+OK 2 20\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_mail_list_summary_start_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 3\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 2 0\r\n",
"TOP 3 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(start_index=2),
[("2", "mail subject 2"),
("3", "mail subject 3")]))
test_get_mail_list_summary_end_index = \
make_test(["+OK 3 30\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 1\r\n.\r\n",
"+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: mail subject 2\r\n.\r\n",
"+OK\r\n"],
["STAT\r\n",
"TOP 1 0\r\n",
"TOP 2 0\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_list_summary(end_index=2),
[("1", "mail subject 1"),
("2", "mail subject 2")]))
test_get_new_mail_list = \
make_test(["+OK 2 20\r\n"],
["STAT\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_new_mail_list(),
["1", "2"]))
test_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n",
u"user@test.com")))
test_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"+OK\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"user@test.com")))
test_unsupported_reset_command_get_mail_summary = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail_summary(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n",
u"user@test.com")))
test_unsupported_reset_command_get_mail = \
make_test(["+OK 10 octets\r\n" + \
"From: user@test.com\r\n" + \
"Subject: subject test\r\n\r\n" + \
"mymessage\r\n.\r\n",
"-ERR unknown command\r\n"],
["RETR 1\r\n",
"RSET\r\n"],
lambda self: \
self.assertEquals(self.pop3_account.get_mail(1),
(u"From : user@test.com\n" + \
u"Subject : subject test\n\n" + \
u"mymessage\n",
u"user@test.com")))
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.pop3_account.nb_mail = 0
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [])
def test_get_next_mail_index(self):
"""
Test get_next_mail_index first check.
"""
mail_list = [1, 2, 3, 4]
self.pop3_account.nb_mail = 4
self.pop3_account.lastmail = 0
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3, 4])
self.assertEquals(self.pop3_account.lastmail, 4)
def test_get_next_mail_index_second_check(self):
"""
Test get_next_mail_index second check (no parallel checking).
"""
mail_list = [1, 2, 3, 4, 5, 6, 7, 8]
self.pop3_account.nb_mail = 8
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [5, 6, 7, 8])
self.assertEquals(self.pop3_account.lastmail, 8)
def test_get_next_mail_index_second_check_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but not more new emails than last index jmc stopped:
3 new emails after another client checked emails).
"""
mail_list = [1, 2, 3]
self.pop3_account.nb_mail = 3
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(result, [1, 2, 3])
self.assertEquals(self.pop3_account.lastmail, 3)
def test_get_next_mail_index_second_check_bug_parallel_check(self):
"""
Test get_next_mail_index second check (with parallel checking
but with more new emails than last index jmc stopped:
5 new emails after another client checked emails). Cannot make
the difference with one new email since last jmc email check!!
"""
mail_list = [1, 2, 3, 4, 5]
self.pop3_account.nb_mail = 5
self.pop3_account.lastmail = 4
result = []
for elt in self.pop3_account.get_next_mail_index(mail_list):
result.append(elt)
# with no bug it should be:
# self.assertEquals(result, [1, 2, 3, 4, 5])
self.assertEquals(result, [5])
self.assertEquals(self.pop3_account.lastmail, 5)
def test_get_default_status_msg(self):
"""
Get default status message for POP3Account.
"""
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3://login@localhost:1110")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL POP3Account.
"""
self.pop3_account.ssl = True
status_msg = self.pop3_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "pop3s://login@localhost:1110")
class IMAPAccount_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, IMAPAccount])
self.imap_account = IMAPAccount(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com",
login="login")
self.imap_account.password = "pass"
self.imap_account.host = "localhost"
self.imap_account.port = 1143
self.imap_account.ssl = False
self.account_class = IMAPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1143)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["* OK [CAPABILITY IMAP4 LOGIN-REFERRALS " + \
"AUTH=PLAIN]\r\n", \
lambda data: "* CAPABILITY IMAP4 " + \
"LOGIN-REFERRALS AUTH=PLAIN\r\n" + \
data.split()[0] + \
" OK CAPABILITY completed\r\n", \
lambda data: data.split()[0] + \
" OK LOGIN completed\r\n"]
if responses:
self.server.responses += responses
self.server.queries = ["^[^ ]* CAPABILITY", \
"^[^ ]* LOGIN login \"pass\""]
if queries:
self.server.queries += queries
self.server.queries += ["^[^ ]* LOGOUT"]
if not self.imap_account.connected:
self.imap_account.connect()
self.failUnless(self.imap_account.connection, \
"Cannot establish connection")
if core:
model.db_connect()
core(self)
model.db_disconnect()
if self.imap_account.connected:
self.imap_account.disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_connection(self):
test_func = self.make_test()
test_func()
def test_get_mail_list_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
def test_get_mail_list_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_list_summary(), readonly=True)
def test_get_mail_list_summary_start_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
"* 3 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 3\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 2:20 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(start_index=2),
[('2', 'mail subject 2'),
('3', 'mail subject 3')]))
test_func()
def test_get_mail_list_summary_end_index(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 1\r\n\r\nbody text\r\n)\r\n" + \
"* 2 FETCH ((RFC822.header) {38}\r\n" + \
"Subject: mail subject 2\r\n\r\nbody text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1:2 RFC822.header"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_list_summary(end_index=2),
[('1', 'mail subject 1'),
('2', 'mail subject 2')]))
test_func()
def test_get_new_mail_list(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT INBOX",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def __test_select_inbox_does_not_exist(self, tested_func,
exception_message="Mailbox does not exist",
readonly=False):
def check_func(self):
try:
tested_func()
except Exception, e:
self.assertEquals(str(e), exception_message)
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* " + (readonly and "EXAMINE" or "SELECT") + " INBOX"],
check_func)
test_func()
def test_get_new_mail_list_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account_get_new_mail_list())
def test_get_new_mail_list_delimiter1(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_new_mail_list_delimiter2(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "/"
test_func = self.make_test( \
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* SEARCH 9 10\r\n" + \
data.split()[0] + " OK SEARCH completed\r\n"],
["^[^ ]* SELECT \"?INBOX/dir1/subdir2\"?",
"^[^ ]* SEARCH RECENT"],
lambda self: \
self.assertEquals(self.imap_account.get_new_mail_list(),
['9', '10']))
test_func()
def test_get_mail_summary(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail_summary_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail_summary(1),
"Mailbox does not exist (email 1)", True)
def test_get_new_mail_list_inbox_does_not_exist(self):
def check_func(self):
try:
self.imap_account.get_new_mail_list()
except Exception, e:
self.assertEquals(str(e), "Mailbox does not exist")
return
self.fail("No exception raised when selecting non existing mailbox")
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" NO Mailbox does not exist\r\n"],
["^[^ ]* SELECT INBOX"],
check_func)
test_func()
def test_get_mail_summary_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" +\
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" +\
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {12}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822.header\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail_summary(1),
(u"From : None\nSubject : None\n\n",
u"None")))
test_func()
def test_get_mail(self):
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE INBOX",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_get_mail_inbox_does_not_exist(self):
self.__test_select_inbox_does_not_exist(\
lambda: self.imap_account.get_mail(1),
"Mailbox does not exist (email 1)", True)
def test_get_mail_delimiter(self):
self.imap_account.mailbox = "INBOX/dir1/subdir2"
self.imap_account.delimiter = "."
test_func = self.make_test(\
[lambda data: "* 42 EXISTS\r\n* 1 RECENT\r\n* OK" + \
" [UNSEEN 9]\r\n* FLAGS (\Deleted \Seen\*)\r\n*" + \
" OK [PERMANENTFLAGS (\Deleted \Seen\*)\r\n" + \
data.split()[0] + \
" OK [READ-WRITE] SELECT completed\r\n",
lambda data: "* 1 FETCH ((RFC822) {11}\r\nbody" + \
" text\r\n)\r\n" + \
data.split()[0] + " OK FETCH completed\r\n"],
["^[^ ]* EXAMINE \"?INBOX\.dir1\.subdir2\"?",
"^[^ ]* FETCH 1 \(RFC822\)"],
lambda self: \
self.assertEquals(self.imap_account.get_mail(1),
(u"From : None\nSubject : None\n\nbody text\r\n\n",
u"None")))
test_func()
def test_build_folder_cache(self):
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX"\r\n' + \
'* LIST () "." "INBOX.dir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir1"\r\n' + \
'* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
'* LIST () "." "INBOX.dir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"\" \*"],
lambda self: self.assertEquals(self.imap_account._build_folder_cache(),
{"INBOX":
{"dir1":
{"subdir1": {},
"subdir2": {}},
"dir2": {}}}))
test_func()
def test_ls_dir_base(self):
self.test_build_folder_cache()
self.assertEquals(self.imap_account.ls_dir(""),
["INBOX"])
def test_ls_dir_subdir(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX")
result.sort()
self.assertEquals(result,
["dir1", "dir2"])
def test_ls_dir_subsubdir_delim1(self):
self.test_build_folder_cache()
self.imap_account.default_delimiter = "."
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_ls_dir_subsubdir_delim2(self):
self.test_build_folder_cache()
result = self.imap_account.ls_dir("INBOX/dir1")
result.sort()
self.assertEquals(result,
["subdir1", "subdir2"])
def test_populate_handler(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
test_func = self.make_test(\
[lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_default_delimiter(self):
self.imap_account.delimiter = "/"
self.imap_account.mailbox = "INBOX/dir1/subdir2"
def call_func(self):
self.imap_account.populate_handler()
self.assertEquals("INBOX.dir1.subdir2", self.imap_account.mailbox)
self.assertEquals(".", self.imap_account.delimiter)
test_func = self.make_test(\
[lambda data: data.split()[0] + ' OK LIST completed\r\n',
lambda data: '* LIST () "." "INBOX.dir1.subdir2"\r\n' + \
data.split()[0] + ' OK LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX/dir1/subdir2\"? \*",
"^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def test_populate_handler_wrong_mailbox(self):
self.assertEquals(".", self.imap_account.delimiter)
self.imap_account.mailbox = "INBOX.dir1.subdir2"
def call_func(self):
try:
self.imap_account.populate_handler()
except Exception, e:
return
self.fail("Exception should have been raised")
test_func = self.make_test(\
[lambda data: data.split()[0] + ' ERR LIST completed\r\n'],
["^[^ ]* LIST \"?INBOX.dir1.subdir2\"? \*"],
call_func)
test_func()
def check_get_next_mail_index(self, mail_list):
"""
Common tests for get_next_mail_index method.
"""
result = []
original_mail_list = [elt for elt in mail_list]
for elt in self.imap_account.get_next_mail_index(mail_list):
result.append(elt)
self.assertEquals(mail_list, [])
self.assertEquals(result, original_mail_list)
def test_get_next_mail_index_empty(self):
"""
Test get_next_mail_index with empty mail_list parameter.
"""
mail_list = []
self.check_get_next_mail_index(mail_list)
def test_get_next_mail_index(self):
"""
Test get_next_mail_index.
"""
mail_list = [1, 2, 3, 4]
self.check_get_next_mail_index(mail_list)
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imap://login@localhost:1143")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
self.imap_account.ssl = True
status_msg = self.imap_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "imaps://login@localhost:1143")
class AbstractSMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount, AbstractSMTPAccount])
self.account_class = AbstractSMTPAccount
def test_default_account_post_func_no_default_true(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "user1@test.com")
self.assertTrue(value)
def test_default_account_post_func_no_default_false(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "user1@test.com")
self.assertTrue(value)
def test_default_account_post_func_true(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("True", None, "user1@test.com")
self.assertTrue(value)
self.assertFalse(account12.default_account)
def test_default_account_post_func_false(self):
user1 = User(jid="user1@test.com")
account11 = AbstractSMTPAccount(user=user1,
name="account11",
jid="account11@jmc.test.com")
account12 = AbstractSMTPAccount(user=user1,
name="account12",
jid="account12@jmc.test.com")
account12.default_account = True
(name, field_type, field_options, post_func, default_func) = \
AbstractSMTPAccount.get_register_fields()[0]
value = post_func("False", None, "user1@test.com")
self.assertFalse(value)
self.assertTrue(account12.default_account)
def test_create_email(self):
account11 = AbstractSMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
email = account11.create_email("from@test.com",
"to@test.com",
"subject",
"body")
self.assertEqual(email['From'], "from@test.com")
self.assertEqual(email['To'], "to@test.com")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email.get_payload(), "body")
def test_create_email_other_headers(self):
account11 = AbstractSMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
email = account11.create_email("from@test.com",
"to@test.com",
"subject",
"body",
{"Bcc": "bcc@test.com",
"Cc": "cc@test.com"})
self.assertEqual(email['From'], "from@test.com")
self.assertEqual(email['To'], "to@test.com")
self.assertEqual(email['Subject'], "subject")
self.assertEqual(email['Bcc'], "bcc@test.com")
self.assertEqual(email['Cc'], "cc@test.com")
self.assertEqual(email.get_payload(), "body")
class SMTPAccount_TestCase(Account_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, ExampleAccount, User,
GlobalSMTPAccount,
AbstractSMTPAccount, SMTPAccount])
self.account_class = SMTPAccount
def make_test(self, responses=None, queries=None, core=None):
def inner():
self.server = server.DummyServer("localhost", 1025)
thread.start_new_thread(self.server.serve, ())
self.server.responses = []
if responses:
self.server.responses += responses
self.server.responses += ["221 localhost closing connection\r\n"]
self.server.queries = []
if queries:
self.server.queries += queries
self.server.queries += ["quit\r\n"]
if core:
model.db_connect()
core(self)
model.db_disconnect()
self.failUnless(self.server.verify_queries())
return inner
def test_send_email_esmtp_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_no_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost SMTP\r\n",
"504 ESMTP not supported\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"helo .*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method2(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN CRAM-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 ZGF4IDNmNDM2NzY0YzBhNjgyMTQ1MzhhZGNiMjE2YTYxZjRm\r\n",
"535 Incorrect Authentication data\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH CRAM-MD5\r\n",
".*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_send_email_esmtp_auth_method_with_no_suitable_auth_method_error(self):
model.db_connect()
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
model.db_disconnect()
email = smtp_account.create_email("from@test.com",
"to@test.com",
"subject",
"body")
test_func = self.make_test(["220 localhost ESMTP\r\n",
"250-localhost Hello 127.0.0.1\r\n"
+ "250-SIZE 52428800\r\n"
+ "250-AUTH PLAIN LOGIN DIGEST-MD5\r\n"
+ "250-PIPELINING\r\n"
+ "250 HELP\r\n",
"334 asd235r4\r\n",
"235 Authentication succeeded\r\n",
"250 OK\r\n",
"250 Accepted\r\n",
"354 Enter message\r\n",
None, None, None, None,
None, None, None, None,
"250 OK\r\n"],
["ehlo .*\r\n",
"AUTH LOGIN .*\r\n",
".*\r\n",
"mail FROM:<" + str(email['From']) + ">.*",
"rcpt TO:<" + str(email['To']) + ">\r\n",
"data\r\n"] +
email.as_string().split("\n") + [".\r\n"],
lambda self: \
smtp_account.send_email(email))
test_func()
def test_get_default_status_msg(self):
"""
Get default status message for IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtp://user@localhost:1025")
def test_get_default_status_msg_ssl(self):
"""
Get default status message for SSL IMAPAccount.
"""
smtp_account = SMTPAccount(user=User(jid="user1@test.com"),
name="account11",
jid="account11@jmc.test.com")
smtp_account.host = "localhost"
smtp_account.port = 1025
smtp_account.login = "user"
smtp_account.password = "pass"
smtp_account.tls = True
status_msg = smtp_account.get_default_status_msg(Lang.en)
self.assertEquals(status_msg, "smtps://user@localhost:1025")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AccountModule_TestCase, 'test'))
suite.addTest(unittest.makeSuite(MailAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(POP3Account_TestCase, 'test'))
suite.addTest(unittest.makeSuite(IMAPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(AbstractSMTPAccount_TestCase, 'test'))
suite.addTest(unittest.makeSuite(SMTPAccount_TestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
dax/jmc
|
src/jmc/model/tests/account.py
|
Python
|
gpl-2.0
| 54,705
|
"""
This file re-creates the major DFXML classes with an emphasis on type safety, serializability, and de-serializability.
With this module, reading disk images or DFXML files is done with the parse or iterparse functions. Writing DFXML files can be done with the DFXMLObject.print_dfxml function.
"""
__version__ = "0.4.5"
#Remaining roadmap to 1.0.0:
# * Documentation.
# * User testing.
# * Compatibility with the DFXML schema, version >1.1.1.
import logging
import re
import copy
import xml.etree.ElementTree as ET
import subprocess
import dfxml
import os
import sys
import struct
_logger = logging.getLogger(os.path.basename(__file__))
#Contains: (namespace, local name) qualified XML element name pairs
_warned_elements = set([])
_warned_byterun_attribs = set([])
#Contains: Unexpected 'facet' values on byte_runs elements.
_warned_byterun_facets = set([])
#Issue some log statements only once per program invocation.
_nagged_alloc = False
_warned_byterun_badtypecomp = False
XMLNS_REGXML = "http://www.forensicswiki.org/wiki/RegXML"
def _ET_tostring(e):
"""Between Python 2 and 3, there are some differences in the ElementTree library's tostring() behavior. One, the method balks at the "unicode" encoding in 2. Two, in 2, the XML prototype's output with every invocation. This method serves as a wrapper to deal with those issues."""
if sys.version_info[0] < 3:
tmp = ET.tostring(e, encoding="UTF-8")
if tmp[0:2] == "<?":
#Trim away first line; it's an XML prototype. This only appears in Python 2's ElementTree output.
return tmp[ tmp.find("?>\n")+3 : ]
else:
return tmp
else:
return ET.tostring(e, encoding="unicode")
def _boolcast(val):
"""Takes Boolean values, and 0 or 1 in string or integer form, and casts them all to Boolean. Preserves nulls. Balks at everything else."""
if val is None:
return None
if val in [True, False]:
return val
_val = val
if val in ["0", "1"]:
_val = int(val)
if _val in [0, 1]:
return _val == 1
_logger.debug("val = " + repr(val))
raise ValueError("Received a not-straightforwardly-Boolean value. Expected some form of 0, 1, True, or False.")
def _bytecast(val):
"""Casts a value as a byte string. If a character string, assumes a UTF-8 encoding."""
if val is None:
return None
if isinstance(val, bytes):
return val
return _strcast(val).encode("utf-8")
def _intcast(val):
"""Casts input integer or string to integer. Preserves nulls. Balks at everything else."""
if val is None:
return None
if isinstance(val, int):
return val
if isinstance(val, str):
if val[0] == "-":
if val[1:].isdigit():
return int(val)
else:
if val.isdigit():
return int(val)
_logger.debug("val = " + repr(val))
raise ValueError("Received a non-int-castable value. Expected an integer or an integer as a string.")
def _read_differential_annotations(annodict, element, annoset):
"""
Uses the shorthand-to-attribute mappings of annodict to translate attributes of element into annoset.
"""
#_logger.debug("annoset, before: %r." % annoset)
#Start with inverting the dictionary
_d = { annodict[k].replace("delta:",""):k for k in annodict }
#_logger.debug("Inverted dictionary: _d = %r" % _d)
for attr in element.attrib:
#_logger.debug("Looking for differential annotations: %r" % element.attrib)
(ns, an) = _qsplit(attr)
if an in _d and ns == dfxml.XMLNS_DELTA:
#_logger.debug("Found; adding %r." % _d[an])
annoset.add(_d[an])
#_logger.debug("annoset, after: %r." % annoset)
def _qsplit(tagname):
"""Requires string input. Returns namespace and local tag name as a pair. I could've sworn this was a basic implementation gimme, but ET.QName ain't it."""
_typecheck(tagname, str)
if tagname[0] == "{":
i = tagname.rfind("}")
return ( tagname[1:i], tagname[i+1:] )
else:
return (None, tagname)
def _strcast(val):
if val is None:
return None
return str(val)
def _typecheck(obj, classinfo):
if not isinstance(obj, classinfo):
_logger.info("obj = " + repr(obj))
if isinstance(classinfo, tuple):
raise TypeError("Expecting object to be one of the types %r." % (classinfo,))
else:
raise TypeError("Expecting object to be of type %r." % classinfo)
class DFXMLObject(object):
def __init__(self, *args, **kwargs):
self.command_line = kwargs.get("command_line")
self.version = kwargs.get("version")
self.sources = kwargs.get("sources", [])
self.dc = kwargs.get("dc", dict())
self.externals = kwargs.get("externals", OtherNSElementList())
self._namespaces = dict()
self._volumes = []
self._files = []
input_volumes = kwargs.get("volumes") or []
input_files = kwargs.get("files") or []
for v in input_volumes:
self.append(v)
for f in input_files:
self.append(f)
#Add default namespaces
self.add_namespace("", dfxml.XMLNS_DFXML)
self.add_namespace("dc", dfxml.XMLNS_DC)
def __iter__(self):
"""Yields all VolumeObjects, recursively their FileObjects, and the FileObjects directly attached to this DFXMLObject, in that order."""
for v in self._volumes:
yield v
for f in v:
yield f
for f in self._files:
yield f
def add_namespace(self, prefix, url):
self._namespaces[prefix] = url
ET.register_namespace(prefix, url)
def append(self, value):
if isinstance(value, VolumeObject):
self._volumes.append(value)
elif isinstance(value, FileObject):
self._files.append(value)
else:
_logger.debug("value = %r" % value)
raise TypeError("Expecting a VolumeObject or a FileObject. Got instead this type: %r." % type(value))
def iter_namespaces(self):
"""Yields (prefix, url) pairs of each namespace registered in this DFXMLObject."""
for prefix in self._namespaces:
yield (prefix, self._namespaces[prefix])
def populate_from_Element(self, e):
if "version" in e.attrib:
self.version = e.attrib["version"]
for ce in e.findall(".//*"):
(cns, cln) = _qsplit(ce.tag)
if cln == "command_line":
self.command_line = ce.text
elif cln == "image_filename":
self.sources.append(ce.text)
elif cns not in [dfxml.XMLNS_DFXML, ""]:
#Put all non-DFXML-namespace elements into the externals list.
self.externals.append(ce)
def print_dfxml(self, output_fh=sys.stdout):
"""Memory-efficient DFXML document printer. However, it assumes the whole element tree is already constructed."""
pe = self.to_partial_Element()
dfxml_wrapper = _ET_tostring(pe)
dfxml_foot = "</dfxml>"
#Check for an empty element
if dfxml_wrapper.strip()[-3:] == " />":
dfxml_head = dfxml_wrapper.strip()[:-3] + ">"
elif dfxml_wrapper.strip()[-2:] == "/>":
dfxml_head = dfxml_wrapper.strip()[:-2] + ">"
else:
dfxml_head = dfxml_wrapper.strip()[:-len(dfxml_foot)]
output_fh.write("""<?xml version="1.0"?>\n""")
output_fh.write(dfxml_head)
output_fh.write("\n")
_logger.debug("Writing %d volume objects." % len(self._volumes))
for v in self._volumes:
v.print_dfxml(output_fh)
output_fh.write("\n")
_logger.debug("Writing %d file objects." % len(self._files))
for f in self._files:
e = f.to_Element()
output_fh.write(_ET_tostring(e))
output_fh.write("\n")
output_fh.write(dfxml_foot)
output_fh.write("\n")
def to_Element(self):
outel = self.to_partial_Element()
for e in self.externals:
outel.append(e)
for v in self._volumes:
tmpel = v.to_Element()
outel.append(tmpel)
for f in self._files:
tmpel = f.to_Element()
outel.append(tmpel)
return outel
def to_dfxml(self):
"""Serializes the entire DFXML document tree into a string. Then returns that string. RAM-intensive. Most will want to use print_dfxml() instead"""
return _ET_tostring(self.to_Element())
def to_partial_Element(self):
outel = ET.Element("dfxml")
tmpel0 = ET.Element("metadata")
for key in sorted(self.dc):
_typecheck(key, str)
if ":" in key:
raise ValueError("Dublin Core key-value entries should have keys without the colon character. If this causes an interesting namespace issue for you, please report it as a bug.")
tmpel1 = ET.Element("dc:" + key)
tmpel1.text = self.dc[key]
tmpel0.append(tmpel1)
outel.append(tmpel0)
if self.command_line:
tmpel0 = ET.Element("creator")
tmpel1 = ET.Element("execution_environment")
tmpel2 = ET.Element("command_line")
tmpel2.text = self.command_line
tmpel1.append(tmpel2)
tmpel0.append(tmpel1)
outel.append(tmpel0)
if len(self.sources) > 0:
tmpel0 = ET.Element("source")
for source in self.sources:
tmpel1 = ET.Element("image_filename")
tmpel1.text = source
tmpel0.append(tmpel1)
outel.append(tmpel0)
if self.version:
outel.attrib["version"] = self.version
#Apparently, namespace setting is only available with the write() function, which is memory-impractical for significant uses of DFXML.
#Ref: http://docs.python.org/3.3/library/xml.etree.elementtree.html#xml.etree.ElementTree.ElementTree.write
for prefix in self._namespaces:
attrib_name = "xmlns"
if prefix != "":
attrib_name += ":" + prefix
outel.attrib[attrib_name] = self._namespaces[prefix]
return outel
@property
def command_line(self):
return self._command_line
@command_line.setter
def command_line(self, value):
self._command_line = _strcast(value)
@property
def dc(self):
"""The Dublin Core dictionary of key-value pairs for this document. Typically, "type" is "Hash List", or "Disk Image". Keys should be strings not containing colons, values should be strings. If this causes an issue for you, please report it as a bug."""
return self._dc
@dc.setter
def dc(self, value):
_typecheck(value, dict)
self._dc = value
@property
def externals(self):
"""(This property behaves the same as FileObject.externals.)"""
return self._externals
@externals.setter
def externals(self, val):
_typecheck(val, OtherNSElementList)
self._externals = val
@property
def files(self):
"""List of file objects directly attached to this DFXMLObject. No setter for now."""
return self._files
@property
def namespaces(self):
raise AttributeError("The namespaces dictionary should not be directly accessed; instead, use .iter_namespaces().")
@property
def sources(self):
return self._sources
@sources.setter
def sources(self, value):
if not value is None:
_typecheck(value, list)
self._sources = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = _strcast(value)
@property
def volumes(self):
"""List of volume objects directly attached to this DFXMLObject. No setter for now."""
return self._volumes
class RegXMLObject(object):
def __init__(self, *args, **kwargs):
self.command_line = kwargs.get("command_line")
self.interpreter = kwargs.get("interpreter")
self.metadata = kwargs.get("metadata")
self.program = kwargs.get("program")
self.program_version = kwargs.get("program_version")
self.sources = kwargs.get("sources", [])
self.version = kwargs.get("version")
self._hives = []
self._cells = []
self._namespaces = dict()
input_hives = kwargs.get("hives") or [] # In case kwargs["hives"] = None.
input_cells = kwargs.get("cells") or []
for hive in input_hives:
self.append(hive)
for cell in input_cells:
self.append(cells)
#Add default namespaces
#TODO This will cause a problem when the Objects bindings are used for a DFXML document and RegXML document in the same program.
self.add_namespace("", XMLNS_REGXML)
def __iter__(self):
"""Yields all HiveObjects, recursively their CellObjects, and the CellObjects directly attached to this RegXMLObject, in that order."""
for h in self._hives:
yield h
for c in h:
yield c
for c in self._cells:
yield c
def add_namespace(self, prefix, url):
self._namespaces[prefix] = url
ET.register_namespace(prefix, url)
def append(self, value):
if isinstance(value, HiveObject):
self._hives.append(value)
elif isinstance(value, CellObject):
self._cells.append(value)
else:
_logger.debug("value = %r" % value)
raise TypeError("Expecting a HiveObject or a CellObject. Got instead this type: %r." % type(value))
def print_regxml(self, output_fh=sys.stdout):
"""Serializes and prints the entire object, without constructing the whole tree."""
regxml_wrapper = _ET_tostring(self.to_partial_Element())
#_logger.debug("regxml_wrapper = %r." % regxml_wrapper)
regxml_foot = "</regxml>"
#Check for an empty element
if regxml_wrapper.strip()[-3:] == " />":
regxml_head = regxml_wrapper.strip()[:-3] + ">"
elif regxml_wrapper.strip()[-2:] == "/>":
regxml_head = regxml_wrapper.strip()[:-2] + ">"
else:
regxml_head = regxml_wrapper.strip()[:-len(regxml_foot)]
output_fh.write(regxml_head)
output_fh.write("\n")
for hive in self._hives:
hive.print_regxml(output_fh)
output_fh.write(regxml_foot)
output_fh.write("\n")
def to_Element(self):
outel = self.to_partial_Element()
for hive in self._hives:
tmpel = hive.to_Element()
outel.append(tmpel)
for cell in self._cells:
tmpel = cell.to_Element()
outel.append(tmpel)
return outel
def to_partial_Element(self):
"""
Creates the wrapping RegXML element. No hives, no cells. Saves on creating an entire Element tree in memory.
"""
outel = ET.Element("regxml")
if self.version:
outel.attrib["version"] = self.version
if self.program or self.program_version:
tmpel0 = ET.Element("creator")
if self.program:
tmpel1 = ET.Element("program")
tmpel1.text = self.program
tmpel0.append(tmpel1)
if self.program_version:
tmpel1 = ET.Element("version")
tmpel1.text = self.program_version
tmpel0.append(tmpel1)
outel.append(tmpel0)
if self.command_line:
tmpel0 = ET.Element("execution_environment")
if self.interpreter:
tmpel1 = ET.Element("interpreter")
tmpel1.text = self.interpreter
tmpel1 = ET.Element("command_line")
tmpel1.text = self.command_line
tmpel0.append(tmpel1)
#TODO Note libraries used at run-time
outel.append(tmpel0)
if len(self.sources) > 0:
tmpel0 = ET.Element("source")
for source in self.sources:
tmpel1 = ET.Element("image_filename")
tmpel1.text = source
tmpel0.append(tmpel1)
outel.append(tmpel0)
#Apparently, namespace setting is only available with the write() function, which is memory-impractical for significant uses of RegXML.
#Ref: http://docs.python.org/3.3/library/xml.etree.elementtree.html#xml.etree.ElementTree.ElementTree.write
for prefix in self._namespaces:
attrib_name = "xmlns"
if prefix != "":
attrib_name += ":" + prefix
outel.attrib[attrib_name] = self._namespaces[prefix]
return outel
def to_regxml(self):
"""Serializes the entire RegXML document tree into a string. Returns that string. RAM-intensive. Most will want to use print_regxml() instead."""
return _ET_tostring(self.to_Element())
class VolumeObject(object):
_all_properties = set([
"annos",
"allocated_only",
"block_count",
"block_size",
"byte_runs",
"externals",
"first_block",
"ftype",
"ftype_str",
"last_block",
"partition_offset",
"original_volume",
"sector_size"
])
_diff_attr_names = {
"new":"delta:new_volume",
"deleted":"delta:deleted_volume",
"modified":"delta:modified_volume",
"matched":"delta:matched"
}
#TODO There may be need in the future to compare the annotations as well. It complicates make_differential_dfxml too much for now.
_incomparable_properties = set([
"annos"
])
def __init__(self, *args, **kwargs):
self._files = []
self._annos = set()
self._diffs = set()
for prop in VolumeObject._all_properties:
if prop in ["annos", "files"]:
continue
elif prop == "externals":
setattr(self, prop, kwargs.get(prop, OtherNSElementList()))
else:
setattr(self, prop, kwargs.get(prop))
def __iter__(self):
"""Yields all FileObjects directly attached to this VolumeObject."""
for f in self._files:
yield f
def __repr__(self):
parts = []
for prop in VolumeObject._all_properties:
#Skip outputting the files list.
if prop == "files":
continue
val = getattr(self, prop)
if not val is None:
parts.append("%s=%r" % (prop, val))
return "VolumeObject(" + ", ".join(parts) + ")"
def append(self, value):
_typecheck(value, FileObject)
self._files.append(value)
def compare_to_original(self):
self._diffs = self.compare_to_other(self.original_volume, True)
def compare_to_other(self, other, ignore_original=False):
"""Returns a set of all the properties found to differ."""
_typecheck(other, VolumeObject)
diffs = set()
for prop in VolumeObject._all_properties:
if prop in VolumeObject._incomparable_properties:
continue
if ignore_original and prop == "original_volume":
continue
#_logger.debug("getattr(self, %r) = %r" % (prop, getattr(self, prop)))
#_logger.debug("getattr(other, %r) = %r" % (prop, getattr(other, prop)))
#Allow file system type to be case-insensitive
if prop == "ftype_str":
o = getattr(other, prop)
if o: o = o.lower()
s = getattr(self, prop)
if s: s = s.lower()
if s != o:
diffs.add(prop)
else:
if getattr(self, prop) != getattr(other, prop):
diffs.add(prop)
return diffs
def populate_from_Element(self, e):
global _warned_elements
_typecheck(e, (ET.Element, ET.ElementTree))
#_logger.debug("e = %r" % e)
#Read differential annotations
_read_differential_annotations(VolumeObject._diff_attr_names, e, self.annos)
#Split into namespace and tagname
(ns, tn) = _qsplit(e.tag)
assert tn in ["volume", "original_volume"]
#Look through direct-child elements to populate run array
for ce in e.findall("./*"):
#_logger.debug("ce = %r" % ce)
(cns, ctn) = _qsplit(ce.tag)
#_logger.debug("cns = %r" % cns)
#_logger.debug("ctn = %r" % ctn)
if ctn == "byte_runs":
self.byte_runs = ByteRuns()
self.byte_runs.populate_from_Element(ce)
elif ctn == "original_volume":
self.original_volume = VolumeObject()
self.original_volume.populate_from_Element(ce)
elif ctn in VolumeObject._all_properties:
#_logger.debug("ce.text = %r" % ce.text)
setattr(self, ctn, ce.text)
#_logger.debug("getattr(self, %r) = %r" % (ctn, getattr(self, ctn)))
elif cns not in [dfxml.XMLNS_DFXML, ""]:
#Put all non-DFXML-namespace elements into the externals list.
self.externals.append(ce)
else:
if (cns, ctn) not in _warned_elements:
_warned_elements.add((cns, ctn))
_logger.warning("Unsure what to do with this element in a VolumeObject: %r" % ce)
def print_dfxml(self, output_fh=sys.stdout):
pe = self.to_partial_Element()
dfxml_wrapper = _ET_tostring(pe)
if len(pe) == 0 and len(self._files) == 0:
output_fh.write(dfxml_wrapper)
return
dfxml_foot = "</volume>"
#Deal with an empty element being printed as <elem/>
if len(pe) == 0:
replaced_dfxml_wrapper = dfxml_wrapper.replace(" />", ">")
dfxml_head = replaced_dfxml_wrapper
else:
dfxml_head = dfxml_wrapper.strip()[:-len(dfxml_foot)]
output_fh.write(dfxml_head)
output_fh.write("\n")
_logger.debug("Writing %d file objects for this volume." % len(self._files))
for f in self._files:
e = f.to_Element()
output_fh.write(_ET_tostring(e))
output_fh.write("\n")
output_fh.write(dfxml_foot)
output_fh.write("\n")
def to_Element(self):
outel = self.to_partial_Element()
for e in self.externals:
outel.append(e)
for f in self._files:
tmpel = f.to_Element()
outel.append(tmpel)
return outel
def to_partial_Element(self):
"""Returns the volume element with its properties, except for the child fileobjects. Properties are appended in DFXML schema order."""
outel = ET.Element("volume")
annos_whittle_set = copy.deepcopy(self.annos)
diffs_whittle_set = copy.deepcopy(self.diffs)
#Add differential annotations
for annodiff in VolumeObject._diff_attr_names:
if annodiff in annos_whittle_set:
outel.attrib[VolumeObject._diff_attr_names[annodiff]] = "1"
annos_whittle_set.remove(annodiff)
if len(annos_whittle_set) > 0:
_logger.warning("Failed to export some differential annotations: %r." % annos_whittle_set)
if self.byte_runs:
outel.append(self.byte_runs.to_Element())
def _append_el(prop, value):
tmpel = ET.Element(prop)
_keep = False
if not value is None:
tmpel.text = str(value)
_keep = True
if prop in self.diffs:
tmpel.attrib["delta:changed_property"] = "1"
diffs_whittle_set.remove(prop)
_keep = True
if _keep:
outel.append(tmpel)
def _append_str(prop):
value = getattr(self, prop)
_append_el(prop, value)
def _append_bool(prop):
value = getattr(self, prop)
if not value is None:
value = "1" if value else "0"
_append_el(prop, value)
for prop in [
"partition_offset",
"sector_size",
"block_size",
"ftype",
"ftype_str",
"block_count",
"first_block",
"last_block"
]:
_append_str(prop)
#Output the one Boolean property
_append_bool("allocated_only")
#Output the original volume's properties
if not self.original_volume is None or "original_volume" in diffs_whittle_set:
#Skip FileObject list, if any
if self.original_volume is None:
tmpel = ET.Element("delta:original_volume")
else:
tmpel = self.original_volume.to_partial_Element()
tmpel.tag = "delta:original_volume"
if "original_volume" in diffs_whittle_set:
tmpel.attrib["delta:changed_property"] = "1"
outel.append(tmpel)
if len(diffs_whittle_set) > 0:
_logger.warning("Did not annotate all of the differing properties of this volume. Remaining properties: %r." % diffs_whittle_set)
return outel
@property
def allocated_only(self):
return self._allocated_only
@allocated_only.setter
def allocated_only(self, val):
self._allocated_only = _boolcast(val)
@property
def annos(self):
"""Set of differential annotations. Expected members are the keys of this class's _diff_attr_names dictionary."""
return self._annos
@annos.setter
def annos(self, val):
_typecheck(val, set)
self._annos = val
@property
def block_count(self):
return self._block_count
@block_count.setter
def block_count(self, val):
self._block_count = _intcast(val)
@property
def block_size(self):
return self._block_size
@block_size.setter
def block_size(self, val):
self._block_size = _intcast(val)
@property
def diffs(self):
return self._diffs
@property
def externals(self):
"""(This property behaves the same as FileObject.externals.)"""
return self._externals
@externals.setter
def externals(self, val):
_typecheck(val, OtherNSElementList)
self._externals = val
@property
def first_block(self):
return self._first_block
@first_block.setter
def first_block(self, val):
self._first_block = _intcast(val)
@property
def ftype(self):
return self._ftype
@ftype.setter
def ftype(self, val):
self._ftype = _intcast(val)
@property
def ftype_str(self):
return self._ftype_str
@ftype_str.setter
def ftype_str(self, val):
self._ftype_str = _strcast(val)
@property
def last_block(self):
return self._last_block
@last_block.setter
def last_block(self, val):
self._last_block = _intcast(val)
@property
def original_volume(self):
return self._original_volume
@original_volume.setter
def original_volume(self, val):
if not val is None:
_typecheck(val, VolumeObject)
self._original_volume= val
@property
def partition_offset(self):
return self._partition_offset
@partition_offset.setter
def partition_offset(self, val):
self._partition_offset = _intcast(val)
@property
def sector_size(self):
return self._sector_size
@sector_size.setter
def sector_size(self, val):
self._sector_size = _intcast(val)
class HiveObject(object):
_all_properties = set([
"annos",
"mtime",
"filename",
"original_fileobject",
"original_hive"
])
_diff_attr_names = {
"new":"delta:new_hive",
"deleted":"delta:deleted_hive",
"modified":"delta:modified_hive",
"matched":"delta:matched"
}
_incomparable_properties = set([
"annos"
])
def __init__(self, *args, **kwargs):
self._cells = []
self._annos = set()
self._diffs = set()
for prop in HiveObject._all_properties:
if prop in ["annos", "cells"]:
continue
setattr(self, prop, kwargs.get(prop))
def __iter__(self):
"""Yields all CellObjects directly attached to this HiveObject."""
for c in self._cells:
yield c
def append(self, value):
_typecheck(value, CellObject)
self._cells.append(value)
def compare_to_original(self):
self._diffs = self.compare_to_other(self.original_hive, True)
def compare_to_other(self, other, ignore_original=False):
"""Returns a set of all the properties found to differ."""
_typecheck(other, HiveObject)
diffs = set()
for prop in HiveObject._all_properties:
if prop in HiveObject._incomparable_properties:
continue
if ignore_original and prop == "original_hive":
continue
#Allow file system type to be case-insensitive
if getattr(self, prop) != getattr(other, prop):
diffs.add(prop)
return diffs
def print_regxml(self, output_fh=sys.stdout):
pe = self.to_partial_Element()
xml_wrapper = _ET_tostring(pe)
xml_foot = "</hive>"
#Check for an empty element
if xml_wrapper.strip()[-3:] == " />":
xml_head = xml_wrapper.strip()[:-3] + ">"
elif xml_wrapper.strip()[-2:] == "/>":
xml_head = xml_wrapper.strip()[:-2] + ">"
else:
xml_head = xml_wrapper.strip()[:-len(xml_foot)]
output_fh.write(xml_head)
output_fh.write("\n")
for cell in self._cells:
output_fh.write(cell.to_regxml())
output_fh.write("\n")
output_fh.write(xml_foot)
output_fh.write("\n")
def to_Element(self):
outel = self.to_partial_Element()
for cell in self._cells:
tmpel = cell.to_Element()
outel.append(tmpel)
return outel
def to_partial_Element(self):
outel = ET.Element("hive")
if self.filename:
tmpel = ET.Element("filename")
tmpel.text = self.filename
outel.append(tmpel)
if self.mtime:
tmpel = self.mtime.to_Element()
outel.append(tmpel)
if self.original_fileobject:
tmpel = self.original_fileobject.to_Element()
#NOTE: "delta" namespace intentionally omitted.
tmpel.tag = "original_fileobject"
outel.append(tmpel)
return outel
@property
def annos(self):
"""Set of differential annotations. Expected members are the keys of this class's _diff_attr_names dictionary."""
return self._annos
@annos.setter
def annos(self, val):
_typecheck(val, set)
self._annos = val
@property
def filename(self):
"""Path of the hive file within the parent file system."""
return self._filename
@filename.setter
def filename(self, val):
self._filename = _strcast(val)
@property
def mtime(self):
return self._mtime
@mtime.setter
def mtime(self, val):
if val is None:
self._mtime = None
elif isinstance(val, TimestampObject):
self._mtime = val
else:
checked_val = TimestampObject(val, name="mtime")
self._mtime = checked_val
@property
def original_fileobject(self):
return self._original_fileobject
@original_fileobject.setter
def original_fileobject(self, val):
if not val is None:
_typecheck(val, FileObject)
self._original_fileobject = val
@property
def original_hive(self):
return self._original_hive
@original_hive.setter
def original_hive(self, val):
if not val is None:
_typecheck(val, HiveObject)
self._original_hive = val
class ByteRun(object):
_all_properties = set([
"img_offset",
"fs_offset",
"file_offset",
"fill",
"len",
"type",
"uncompressed_len",
"sha1", # TL: Added sha1 property
"md5", # TL: Added md5 property
"entropy" # TL: Added entropy property
])
def __init__(self, *args, **kwargs):
for prop in ByteRun._all_properties:
setattr(self, prop, kwargs.get(prop))
def __add__(self, other):
"""
Joins two ByteRun objects into a single run if possible. Returns a new object of the concatenation if successful, None if not.
"""
_typecheck(other, ByteRun)
#Don't glom fills of different values
if self.fill != other.fill:
return None
#Don't glom typed byte runs (particularly since type has been observed to be 'resident')
if self.type != other.type:
return None
#Don't glom compressed runs
if not self.uncompressed_len is None or not other.uncompressed_len is None:
return None
if None in [self.len, other.len]:
return None
for prop in ["img_offset", "fs_offset", "file_offset"]:
if None in [getattr(self, prop), getattr(other, prop)]:
continue
if getattr(self, prop) + self.len == getattr(other, prop):
retval = copy.deepcopy(self)
retval.len += other.len
return retval
return None
def __eq__(self, other):
#Check type
if other is None:
return False
if not isinstance(other, ByteRun):
if not _warned_byterun_badtypecomp:
_logger.warning("A ByteRun comparison was called against a non-ByteRun object: " + repr(other) + ".")
_warned_byterun_badtypecomp = True
return False
#Check values
return \
self.img_offset == other.img_offset and \
self.fs_offset == other.fs_offset and \
self.file_offset == other.file_offset and \
self.fill == other.fill and \
self.len == other.len and \
self.type == other.type and \
self.uncompressed_len == other.uncompressed_len
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parts = []
for prop in ByteRun._all_properties:
val = getattr(self, prop)
if not val is None:
parts.append("%s=%r" % (prop, val))
return "ByteRun(" + ", ".join(parts) + ")"
def populate_from_Element(self, e):
_typecheck(e, (ET.Element, ET.ElementTree))
#Split into namespace and tagname
(ns, tn) = _qsplit(e.tag)
# TL: Added "run" to check (for fiwalk-0.6.3.exe)
assert (tn == "byte_run" or tn == "run")
copied_attrib = copy.deepcopy(e.attrib)
#Populate run properties from element attributes
for prop in ByteRun._all_properties:
if prop in copied_attrib:
val = copied_attrib.get(prop)
if not val is None:
setattr(self, prop, val)
del copied_attrib[prop]
#Note remaining properties
for prop in copied_attrib:
if prop not in _warned_byterun_attribs:
_warned_byterun_attribs.add(prop)
_logger.warning("No instructions present for processing this attribute found on a byte run: %r." % prop)
# TL: Quick fix to read in block hashes for analysis
# Need to revisit in future for better error checking
for ce in e.findall("./*"):
(cns, ctn) = _qsplit(ce.tag)
if ctn == "hashdigest":
setattr(self, "type", ce.attrib["type"])
if ce.attrib["type"] == "md5" or ce.attrib["type"] == "MD5":
setattr(self, "md5", ce.text)
elif ce.attrib["type"] == "sha1":
setattr(self, "md5", ce.text)
def to_Element(self):
outel = ET.Element("byte_run")
# TL: Added support to append a child hashdigest element
def _append_hash(name, value):
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element("hashdigest")
tmpel.attrib["type"] = name
if not value is None:
tmpel.text = value
#_anno_hash(tmpel) # TL: Not anticipating annotated hashes, so removed
outel.append(tmpel)
for prop in ByteRun._all_properties:
val = getattr(self, prop)
#Skip null properties
if val is None:
continue
# TL: Added support to populate a child hashdigest element
if prop == "md5":
_append_hash("MD5", self.md5)
continue
elif prop in ["md5", "MD5", "sha1", "SHA1", "type"]:
continue
elif isinstance(val, bytes):
outel.attrib[prop] = str(struct.unpack("b", val)[0])
else:
outel.attrib[prop] = str(val)
return outel
# TL: Added sha1 property setter and getter
@property
def sha1(self):
return self._sha1
@sha1.setter
def sha1(self, val):
self._sha1 = _strcast(val)
# TL: Added md5 property setter and getter
@property
def md5(self):
return self._md5
@md5.setter
def md5(self, val):
self._md5 = _strcast(val)
# TL: Added entropy property setter and getter
@property
def entropy(self):
return self._entropy
@entropy.setter
def entropy(self, val):
self._entropy = _strcast(val)
@property
def file_offset(self):
return self._file_offset
@file_offset.setter
def file_offset(self, val):
self._file_offset = _intcast(val)
@property
def fill(self):
"""
At the moment, the fill value is assumed to be a single byte. The value you receive from this property wll be None or a byte. Setting fill to the string "0" will return the null byte when retrieved later.
For now, setting to any digital string (e.g. "41") will return a byte representing the integer casting string (e.g. the number 41), but this is subject to change pending some discussion.
"""
return self._fill
@fill.setter
def fill(self, val):
if val is None:
self._fill = val
elif val == "0":
self._fill = b'\x00'
elif isinstance(val, bytes):
if len(val) != 1:
raise NotImplementedError("Received a %d-length fill byte string for a byte run. Only 1-byte fill strings are accepted for now, pending further discussion.")
self._fill = val
elif isinstance(val, int):
#This is the easiest way between Python 2 and 3. int.to_bytes would be better, but that is only in >=3.2.
self._fill = struct.pack("b", val)
elif isinstance(val, str) and val.isdigit():
#Recurse, changing type
self.fill = int(val)
@property
def fs_offset(self):
return self._fs_offset
@fs_offset.setter
def fs_offset(self, val):
self._fs_offset = _intcast(val)
@property
def img_offset(self):
return self._img_offset
@img_offset.setter
def img_offset(self, val):
self._img_offset = _intcast(val)
@property
def len(self):
return self._len
@len.setter
def len(self, val):
self._len = _intcast(val)
@property
def type(self):
return self._type
@type.setter
def type(self, val):
self._type = _strcast(val)
@property
def uncompressed_len(self):
return self._uncompressed_len
@uncompressed_len.setter
def uncompressed_len(self, val):
self._uncompressed_len = _intcast(val)
class ByteRuns(object):
"""
A list-like object for ByteRun objects.
"""
#Must define these methods to adhere to the list protocol:
#__len__
#__getitem__
#__setitem__
#__delitem__
#__iter__
#append
#
#Refs:
#http://www.rafekettler.com/magicmethods.html
#http://stackoverflow.com/a/8841520
_facet_values = [None, "data", "inode", "name"]
def __init__(self, run_list=None, **kwargs):
self._facet = kwargs.get("facet")
self._listdata = []
if isinstance(run_list, list):
for run in run_list:
self.append(run)
def __delitem__(self, key):
del self._listdata[key]
def __eq__(self, other):
"""Compares the byte run lists and the facet (allowing a null facet to match "data")."""
#Check type
if other is None:
return False
_typecheck(other, ByteRuns)
if self.facet != other.facet:
if set([self.facet, other.facet]) != set([None, "data"]):
return False
if len(self) != len(other):
#_logger.debug("len(self) = %d" % len(self))
#_logger.debug("len(other) = %d" % len(other))
return False
for (sbr_index, sbr) in enumerate(self):
obr = other[sbr_index]
#_logger.debug("sbr_index = %d" % sbr_index)
#_logger.debug("sbr = %r" % sbr)
#_logger.debug("obr = %r" % obr)
if sbr != obr:
return False
return True
def __getitem__(self, key):
return self._listdata.__getitem__(key)
def __iter__(self):
return iter(self._listdata)
def __len__(self):
return self._listdata.__len__()
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parts = []
for run in self:
parts.append(repr(run))
maybe_facet = ""
if self.facet:
maybe_facet = "facet=%r, " % self.facet
return "ByteRuns(" + maybe_facet + "run_list=[" + ", ".join(parts) + "])"
def __setitem__(self, key, value):
_typecheck(value, ByteRun)
self._listdata[key] = value
def append(self, value):
"""
Appends a ByteRun object to this container's list.
"""
_typecheck(value, ByteRun)
self._listdata.append(value)
def glom(self, value):
"""
Appends a ByteRun object to this container's list, after attempting to join the run with the last run already stored.
"""
_typecheck(value, ByteRun)
if len(self._listdata) == 0:
self.append(value)
else:
last_run = self._listdata[-1]
maybe_new_run = last_run + value
if maybe_new_run is None:
self.append(value)
else:
self._listdata[-1] = maybe_new_run
def iter_contents(self, raw_image, buffer_size=1048576, sector_size=512, errlog=None, statlog=None):
"""
Generator. Yields contents, as byte strings one block at a time, given a backing raw image path. Relies on The SleuthKit's img_cat, so contents can be extracted from any disk image type that TSK supports.
@param buffer_size The maximum size of the byte strings yielded.
@param sector_size The size of a disk sector in the raw image. Required by img_cat.
"""
if not isinstance(raw_image, str):
raise TypeError("iter_contents needs the string path to the image file. Received: %r." % raw_image)
stderr_fh = None
if not errlog is None:
stderr_fh = open(errlog, "wb")
status_fh = None
if not statlog is None:
status_fh = open(errlog, "wb")
#The exit status of the last img_cat.
last_status = None
try:
for run in self:
if run.len is None:
raise AttributeError("Byte runs can't be extracted if a run length is undefined.")
len_to_read = run.len
#If we have a fill character, just pump out that character
if not run.fill is None and len(run.fill) > 0:
while len_to_read > 0:
#This multiplication and slice should handle multi-byte fill characters, in case that ever comes up.
yield (run.fill * buffer_size)[ : min(len_to_read, buffer_size)]
len_to_read -= buffer_size
#Next byte run
continue
if run.img_offset is None:
raise AttributeError("Byte runs can't be extracted if missing a fill character and image offset.")
import platform
if platform.system() == "Windows":
cwd = "sleuthkit-4.1.3-win32" + os.sep + "bin" + os.sep
cmd = [cwd + "img_cat.exe"]
else:
cmd = ["img_cat"]
cmd.append("-b")
cmd.append(str(sector_size))
cmd.append("-s")
cmd.append(str(run.img_offset//sector_size))
cmd.append("-e")
cmd.append(str( (run.img_offset + run.len)//sector_size))
cmd.append(raw_image)
if platform.system() == "Windows":
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr_fh, cwd=cwd)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr_fh)
#Do the buffered read
while len_to_read > 0:
buffer_data = p.stdout.read(buffer_size)
yield_data = buffer_data[ : min(len_to_read, buffer_size)]
if len(yield_data) > 0:
yield yield_data
else:
#Let the subprocess terminate so we can see the exit status
p.wait()
last_status = p.returncode
if last_status != 0:
raise subprocess.CalledProcessError(last_status, " ".join(cmd), "img_cat failed.")
len_to_read -= buffer_size
except Exception as e:
#Cleanup in an exception
if not stderr_fh is None:
stderr_fh.close()
if not status_fh is None:
if isinstance(e, subprocess.CalledProcessError):
status_fh.write(e.returncode)
else:
status_fh.write("1")
status_fh.close()
raise e
#Cleanup when all's gone well.
if not status_fh is None:
if not last_status is None:
status_fh.write(last_status)
status_fh.close()
if not stderr_fh is None:
stderr_fh.close()
def populate_from_Element(self, e):
_typecheck(e, (ET.Element, ET.ElementTree))
#Split into namespace and tagname
(ns, tn) = _qsplit(e.tag)
# TL: Added "byte runs" (with space) to check (for fiwalk-0.6.3.exe)
assert (tn == "byte_runs" or tn == "byte runs")
if "facet" in e.attrib:
self.facet = e.attrib["facet"]
#Look through direct-child elements to populate run array
for ce in e.findall("./*"):
(cns, ctn) = _qsplit(ce.tag)
if ctn == "byte_run" or ctn == "run":
nbr = ByteRun()
nbr.populate_from_Element(ce)
self.append(nbr)
def to_Element(self):
outel = ET.Element("byte_runs")
for run in self:
tmpel = run.to_Element()
outel.append(tmpel)
if self.facet:
outel.attrib["facet"] = self.facet
return outel
@property
def facet(self):
"""Expected to be null, "data", "inode", or "name". See FileObject.data_brs, FileObject.inode_brs, and FileObject.name_brs."""
return self._facet
@facet.setter
def facet(self, val):
if not val is None:
_typecheck(val, str)
if val not in ByteRuns._facet_values:
raise ValueError("A ByteRuns facet must be one of these: %r. Received: %r." % (ByteRuns._facet_values, val))
self._facet = val
re_precision = re.compile(r"(?P<num>\d+)(?P<unit>(|m|n)s|d)?")
class TimestampObject(object):
"""
Encodes the "dftime" type. Wraps around dfxml.dftime, closely enough that this might just get folded into that class.
TimestampObjects implement a vs-null comparison workaround as in the SAS family of products: Null, for ordering purposes, is considered to be a value less than negative infinity.
"""
timestamp_name_list = ["mtime", "atime", "ctime", "crtime", "dtime", "bkup_time"]
def __init__(self, *args, **kwargs):
self.name = kwargs.get("name")
self.prec = kwargs.get("prec")
#_logger.debug("type(args) = %r" % type(args))
#_logger.debug("args = %r" % (args,))
if len(args) == 0:
self.time = None
elif len(args) == 1:
self.time = args[0]
else:
raise ValueError("Unexpected arguments. Whole args tuple: %r." % (args,))
self._timestamp = None
def __eq__(self, other):
#Check type
if other is None:
return False
_typecheck(other, TimestampObject)
if self.name != other.name:
return False
if self.prec != other.prec:
return False
if self.time != other.time:
return False
return True
def __ge__(self, other):
"""Note: The semantics here and in other ordering functions are that "Null" is a value less than negative infinity."""
if other is None:
return False
else:
self._comparison_sanity_check(other)
return self.time.__ge__(other.time)
def __gt__(self, other):
"""Note: The semantics here and in other ordering functions are that "Null" is a value less than negative infinity."""
if other is None:
return False
else:
self._comparison_sanity_check(other)
return self.time.__gt__(other.time)
def __le__(self, other):
"""Note: The semantics here and in other ordering functions are that "Null" is a value less than negative infinity."""
if other is None:
return True
else:
self._comparison_sanity_check(other)
return self.time.__le__(other.time)
def __lt__(self, other):
"""Note: The semantics here and in other ordering functions are that "Null" is a value less than negative infinity."""
if other is None:
return True
else:
self._comparison_sanity_check(other)
return self.time.__lt__(other.time)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parts = []
if self.name:
parts.append("name=%r" % self.name)
if self.prec:
parts.append("prec=%r" % (self.prec,))
if self.time:
parts.append("%r" % self.time)
return "TimestampObject(" + ", ".join(parts) + ")"
def __str__(self):
if self.time:
return str(self.time)
else:
return self.__repr__()
def _comparison_sanity_check(self, other):
if None in (self.time, other.time):
raise ValueError("Can't compare TimestampObjects: %r, %r." % self, other)
def populate_from_Element(self, e):
_typecheck(e, (ET.Element, ET.ElementTree))
if "prec" in e.attrib:
self.prec = e.attrib["prec"]
self.time = e.text
(ns, tn) = _qsplit(e.tag)
self.name = tn
def to_Element(self):
_typecheck(self.name, str)
outel = ET.Element(self.name)
if self.prec:
outel.attrib["prec"] = "%d%s" % self.prec
if self.time:
outel.text = str(self.time)
return outel
@property
def name(self):
"""The type of timestamp - modified (mtime), accessed (atime), etc."""
return self._name
@name.setter
def name(self, value):
if not value is None:
if not value in TimestampObject.timestamp_name_list:
raise ValueError("The timestamp name must be in this list: %r. Received: %r." % (TimestampObject.timestamp_name_list, value))
self._name = value
@property
def prec(self):
"""
A pair, (resolution, unit); unit is a second (s), millisecond, nanosecond, or day (d). The default unit is "s". Can be passed as a string or a duple.
"""
return self._prec
@prec.setter
def prec(self, value):
if value is None:
self._prec = None
return self._prec
elif isinstance(value, tuple) and \
len(value) == 2 and \
isinstance(value[0], int) and \
isinstance(value[1], str):
self._prec = value
return self._prec
m = re_precision.match(value)
md = m.groupdict()
tup = (int(md["num"]), md.get("unit") or "s")
#_logger.debug("tup = %r" % (tup,))
self._prec = tup
@property
def time(self):
"""
The actual timestamp. A dfxml.dftime object. This class might be superfluous and end up collapsing into that...
"""
return self._time
@time.setter
def time(self, value):
if value is None:
self._time = None
else:
checked_value = dfxml.dftime(value)
#_logger.debug("checked_value.timestamp() = %r" % checked_value.timestamp())
self._time = checked_value
#Propagate timestamp value to other formats
self._timestamp = self._time.timestamp()
@property
def timestamp(self):
"""A Unix floating-point timestamp, as time.mktime returns. Currently, there is no setter for this property."""
return self._timestamp
class FileObject(object):
"""
This class provides property accesses, an XML serializer (ElementTree-based), and a deserializer.
The properties interface is NOT function calls, but simple accesses. That is, the old _fileobject_ style:
assert isinstance(fi, dfxml.fileobject)
fi.mtime()
is now replaced with:
assert isinstance(fi, Objects.FileObject)
fi.mtime
"""
_all_properties = set([
"alloc",
"alloc_inode",
"alloc_name",
"annos",
"app_name", # TL: Added app_name property
"app_state", # TL: Added app_state property
"atime",
"basename", # TL: Added basename property
"basename_norm", # TL: Added basename_norm property
"bkup_time",
"byte_runs",
"compressed",
"crtime",
"ctime",
"data_brs",
"dtime",
"error",
"externals",
"filename",
"filename_norm", # TL: Added filename_norm property
"filesize",
"gid",
"id",
"inode",
"inode_brs",
"link_target",
"libmagic",
"md5",
"meta_type",
"mode",
"mtime",
"name_brs",
"name_type",
"nlink",
"original_fileobject",
"orphan",
"orphan_name", # TL: Added orphan_name property
"parent_object",
"partition",
"seq",
"sha1",
"uid",
"unalloc",
"unused",
"used"
])
_br_facet_to_property = {
"data":"data_brs",
"inode":"inode_brs",
"name":"name_brs"
}
#TODO There may be need in the future to compare the annotations as well. It complicates make_differential_dfxml too much for now.
_incomparable_properties = set([
"annos",
"byte_runs",
"id",
#"unalloc", TL: Removed this property
"unused"
])
_diff_attr_names = {
"new":"delta:new_file",
"deleted":"delta:deleted_file",
"renamed":"delta:renamed_file",
"changed":"delta:changed_file",
"modified":"delta:modified_file",
"matched":"delta:matched",
"matched_soft":"delta:matched_soft" # TL: Added soft match delta
}
def __init__(self, *args, **kwargs):
#Prime all the properties
for prop in FileObject._all_properties:
if prop == "annos":
continue
elif prop == "externals":
setattr(self, prop, kwargs.get(prop, OtherNSElementList()))
else:
setattr(self, prop, kwargs.get(prop))
self._annos = set()
self._diffs = set()
def __eq__(self, other):
if other is None:
return False
_typecheck(other, FileObject)
for prop in FileObject._all_properties:
if prop in FileObject._incomparable_properties:
continue
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parts = []
for prop in sorted(FileObject._all_properties):
#Save data byte runs for the end, as theirs lists can get really long.
if prop not in ["byte_runs", "data_brs"]:
value = getattr(self, prop)
if not value is None:
parts.append("%s=%r" % (prop, value))
if self.data_brs:
parts.append("data_brs=%r" % self.byte_runs)
return "FileObject(" + ", ".join(parts) + ")"
def compare_to_original(self):
self._diffs = self.compare_to_other(self.original_fileobject, True)
def compare_to_other(self, other, ignore_original=False, ignore_properties=set(), check_properties=set()):
_typecheck(other, FileObject)
diffs = set()
# TL: Added support to specify a set of poperties to compare
if check_properties:
print("HERE")
for propname in check_properties:
if propname in FileObject._incomparable_properties:
continue
if ignore_original and propname == "original_fileobject":
continue
# TL: Added ignore_properties check
# Can pass a set() of properties to ignore
# e.g., {"filename", "sha1"}
if propname in ignore_properties:
continue
oval = getattr(other, propname)
sval = getattr(self, propname)
if oval is None and sval is None:
continue
if oval != sval:
#_logger.debug("propname, oval, sval: %r, %r, %r" % (propname, oval, sval))
diffs.add(propname)
else:
for propname in FileObject._all_properties:
if propname in FileObject._incomparable_properties:
continue
if ignore_original and propname == "original_fileobject":
continue
# TL: Added ignore_properties check
# Can pass a set() of properties to ignore
# e.g., {"filename", "sha1"}
if propname in ignore_properties:
continue
oval = getattr(other, propname)
sval = getattr(self, propname)
if oval is None and sval is None:
continue
if oval != sval:
#_logger.debug("propname, oval, sval: %r, %r, %r" % (propname, oval, sval))
diffs.add(propname)
return diffs
def extract_facet(self, facet, image_path=None, buffer_size=1048576, partition_offset=None, sector_size=512, errlog=None, statlog=None, icat_threshold = 268435456):
"""
Generator. Extracts the facet with a SleuthKit tool, yielding chunks of the data.
@param buffer_size The facet data is yielded in chunks of at most this parameter's size. Default 1MiB.
@param partition_offset The offset of the file's containing partition, in bytes. Needed for icat. If not given, the FileObject's VolumeObject will be used. If that's also absent, icat can't be used, and img_cat will instead be tried as a fallback (which means byte runs must be in the DFXML).
@param icat_threshold icat incurs extensive, non-sequential IO overhead to walk the filesystem to reach the facet's byte runs. img_cat can be called on each byte run reported in the DFXML file, but on fragmented files this incurs overhead in process spawning. Facets larger than this threshold are extracted with icat. Default 256MiB. Force icat by setting this to -1; force img_cat with infinity (float("inf")).
"""
_image_path = image_path
if _image_path is None:
raise ValueError("The backing image path must be supplied.")
_partition_offset = partition_offset
if _partition_offset is None:
if self.volume_object:
_partition_offset = self.volume_object.partition_offset
#Try using icat; needs inode number and volume offset. We're additionally requiring the filesize be known.
#TODO The icat needs a little more experimentation.
if False and facet == "content" and \
not self.filesize is None and \
self.filesize >= icat_threshold and \
not self.inode is None and \
not _partition_offset is None:
_logger.debug("Extracting with icat: %r." % self)
#Set up logging if desired
stderr_fh = sys.stderr
if not errlog is None:
stderr_fh = open(errlog, "wb")
status_fh = None
if not statlog is None:
status_fh = open(errlog, "w")
#Set up icat process
cmd = ["icat"]
cmd.append("-b")
cmd.append(str(sector_size))
cmd.append("-o")
cmd.append(str(self.volume_object.partition_offset//sector_size))
if not self.volume_object.ftype_str is None:
cmd.append("-f")
cmd.append(self.volume_object.ftype_str)
cmd.append(image_path)
cmd.append(str(self.inode))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr_fh)
#Do a buffered read
len_to_read = self.filesize
while len_to_read > 0:
buffer_data = p.stdout.read(buffer_size)
yield_data = buffer_data[ : min(len_to_read, buffer_size)]
if len(yield_data) > 0:
yield yield_data
else:
#Let the subprocess terminate so we can see the exit status
p.wait()
last_status = p.returncode
#Log the status if requested
if not status_fh is None:
status_fh.write(last_status)
#Act on a bad status
if last_status != 0:
raise subprocess.CalledProcessError(last_status, " ".join(cmd), "icat failed.")
len_to_read -= buffer_size
#Clean up file handles
if status_fh: status_fh.close()
if stderr_fh: stderr_fh.close()
elif not self.byte_runs is None:
for chunk in self.byte_runs.iter_contents(_image_path, buffer_size, sector_size, errlog, statlog):
yield chunk
def is_allocated(self):
"""Collapse potentially-partial allocation information into a yes, no, or unknown answer."""
if self.alloc_inode == True and self.alloc_name == True:
return True
if self.alloc_inode is None and self.alloc_name is None:
if self.alloc is None:
return None
else:
return self.alloc
#Partial allocation information at this point is assumed False. In some file systems, like FAT, we only need one of alloc_inode and alloc_name for allocation status. Guidelines on which should win out haven't been set yet, though, so wait on this.
return False
def populate_from_Element(self, e):
"""Populates this FileObject's properties from an ElementTree Element. The Element need not be retained."""
global _warned_elements
_typecheck(e, (ET.Element, ET.ElementTree))
#_logger.debug("FileObject.populate_from_Element(%r)" % e)
#Split into namespace and tagname
(ns, tn) = _qsplit(e.tag)
assert tn in ["fileobject", "original_fileobject", "parent_object"]
#Map "delta:" attributes of <fileobject>s into the self.annos set
#_logger.debug("self.annos, before: %r." % self.annos)
_read_differential_annotations(FileObject._diff_attr_names, e, self.annos)
#_logger.debug("self.annos, after: %r." % self.annos)
#Look through direct-child elements for other properties
for ce in e.findall("./*"):
(cns, ctn) = _qsplit(ce.tag)
#_logger.debug("Populating from child element: %r." % ce.tag)
#Inherit any marked changes
for attr in ce.attrib:
#_logger.debug("Inspecting attr for diff. annos: %r." % attr)
(ns, an) = _qsplit(attr)
if an == "changed_property" and ns == dfxml.XMLNS_DELTA:
#_logger.debug("Identified changed property: %r." % ctn)
#TODO There may be a more elegant way of handling the hashes and any other attribute-dependent element-to-property mapping. Probably involving XPath.
if ctn == "hashdigest":
if "type" not in ce.attrib:
raise AttributeError("Attribute 'type' not found. Every hashdigest element should have a 'type' attribute to identify the hash type.")
self.diffs.add(ce.attrib["type"].lower())
elif ctn == "byte_runs":
facet = ce.attrib.get("facet")
prop = FileObject._br_facet_to_property.get(facet, "data_brs")
self.diffs.add(prop)
else:
self.diffs.add(ctn)
# TL: Added "byte runs" to check (for old fiwalk-0.6.3.exe)
if ctn == "byte_runs" or ctn == "byte runs":
#byte_runs might be for file contents, the inode/MFT entry, or the directory entry naming the file. Use the facet attribute to determine which. If facet is absent, assume they're data byte runs.
if "facet" in ce.attrib:
if ce.attrib["facet"] not in FileObject._br_facet_to_property:
if not ce.attrib["facet"] in _warned_byterun_facets:
_warned_byterun_facets.add(ce.attrib["facet"])
_logger.warning("byte_runs facet %r was unexpected. Will not interpret this element.")
else:
brs = ByteRuns()
brs.populate_from_Element(ce)
brs.facet = ce.attrib["facet"]
setattr(self, FileObject._br_facet_to_property[brs.facet], brs)
else:
self.byte_runs = ByteRuns()
self.byte_runs.populate_from_Element(ce)
elif ctn == "hashdigest":
if ce.attrib["type"].lower() == "md5":
self.md5 = ce.text
elif ce.attrib["type"].lower() == "sha1":
self.sha1 = ce.text
elif ctn == "original_fileobject":
self.original_fileobject = FileObject()
self.original_fileobject.populate_from_Element(ce)
elif ctn == "parent_object":
self.parent_object = FileObject()
self.parent_object.populate_from_Element(ce)
elif ctn in ["atime", "bkup_time", "crtime", "ctime", "dtime", "mtime"]:
setattr(self, ctn, TimestampObject())
getattr(self, ctn).populate_from_Element(ce)
elif ctn in FileObject._all_properties:
setattr(self, ctn, ce.text)
elif cns not in [dfxml.XMLNS_DFXML, ""]:
#Put all non-DFXML-namespace elements into the externals list.
self.externals.append(ce)
else:
if (cns, ctn) not in _warned_elements:
_warned_elements.add((cns, ctn))
_logger.warning("Uncertain what to do with this element: %r" % ce)
def populate_from_stat(self, s):
"""Populates FileObject fields from a stat() call."""
import os
_typecheck(s, os.stat_result)
self.mode = s.st_mode
self.inode = s.st_ino
self.nlink = s.st_nlink
self.uid = s.st_uid
self.gid = s.st_gid
self.filesize = s.st_size
#s.st_dev is ignored for now.
if "st_mtime" in dir(s):
self.mtime = s.st_mtime
if "st_atime" in dir(s):
self.atime = s.st_atime
if "st_ctime" in dir(s):
self.ctime = s.st_ctime
if "st_birthtime" in dir(s):
self.crtime = s.st_birthtime
def to_Element(self):
"""Creates an ElementTree Element with elements in DFXML schema order."""
outel = ET.Element("fileobject")
annos_whittle_set = copy.deepcopy(self.annos)
diffs_whittle_set = copy.deepcopy(self.diffs)
for annodiff in FileObject._diff_attr_names:
if annodiff in annos_whittle_set:
outel.attrib[FileObject._diff_attr_names[annodiff]] = "1"
annos_whittle_set.remove(annodiff)
if len(annos_whittle_set) > 0:
_logger.warning("Failed to export some differential annotations: %r." % annos_whittle_set)
def _anno_change(el):
if el.tag in self.diffs:
el.attrib["delta:changed_property"] = "1"
diffs_whittle_set.remove(el.tag)
def _anno_hash(el):
if el.attrib["type"] in self.diffs:
el.attrib["delta:changed_property"] = "1"
diffs_whittle_set.remove(el.attrib["type"])
def _anno_byte_runs(el):
if "facet" in el.attrib:
prop = FileObject._br_facet_to_property[el.attrib["facet"]]
else:
prop = "data_brs"
if prop in self.diffs:
el.attrib["delta:changed_property"] = "1"
#_logger.debug("diffs_whittle_set = %r." % diffs_whittle_set)
diffs_whittle_set.remove(prop)
#Recall that Element text must be a string
def _append_str(name, value):
"""Note that empty elements should be created if the element was removed."""
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element(name)
if not value is None:
tmpel.text = str(value)
_anno_change(tmpel)
outel.append(tmpel)
def _append_time(name, value):
"""Note that empty elements should be created if the element was removed."""
if not value is None or name in diffs_whittle_set:
if not value is None and value.time:
tmpel = value.to_Element()
else:
tmpel = ET.Element(name)
_anno_change(tmpel)
outel.append(tmpel)
def _append_bool(name, value):
"""Note that empty elements should be created if the element was removed."""
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element(name)
if not value is None:
tmpel.text = str(1 if value else 0)
_anno_change(tmpel)
outel.append(tmpel)
_using_facets = False
def _append_byte_runs(name, value):
"""The complicated part here is setting the "data" facet on the byte runs, because we assume that no facet definitions means that for this file, there's only the one byte_runs list for data."""
#_logger.debug("_append_byte_runs(%r, %r)" % (name, value))
if value or name in diffs_whittle_set:
if value:
tmpel = value.to_Element()
if "facet" in tmpel.attrib:
_using_facets = True
else:
tmpel = ET.Element("byte_runs")
propname_to_facet = {
"data_brs": "data",
"inode_brs": "inode",
"name_brs": "name"
}
if name in propname_to_facet:
_using_facets = True
tmpel.attrib["facet"] = propname_to_facet[name]
elif _using_facets:
tmpel.attrib["facet"] = propname_to_facet["data_brs"]
_anno_byte_runs(tmpel)
outel.append(tmpel)
def _append_externals():
for e in self.externals:
outel.append(e)
def _append_object(name, value, namespace_prefix=None):
"""name must be the name of a property that has a to_Element() method. namespace_prefix will be prepended as-is to the element tag."""
obj = value
if obj or name in diffs_whittle_set:
if obj:
tmpel = obj.to_Element()
else:
tmpel = ET.Element(name)
#Set the tag name here for properties like parent_object, a FileObject without being wholly a FileObject.
if namespace_prefix:
tmpel.tag = namespace_prefix + name
else:
tmpel.tag = name
_anno_change(tmpel)
outel.append(tmpel)
def _append_hash(name, value):
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element("hashdigest")
tmpel.attrib["type"] = name
if not value is None:
tmpel.text = value
_anno_hash(tmpel)
outel.append(tmpel)
#The parent object is a one-off. Duplicating the whole parent is wasteful, so create a shadow object that just outputs the important bits.
if not self.parent_object is None:
parent_object_shadow = FileObject()
parent_object_shadow.inode = self.parent_object.inode
_append_object("parent_object", parent_object_shadow)
_append_str("filename", self.filename)
_append_str("filename_norm", self.filename_norm) # TL: Added filename_norm to XML out
_append_str("basename", self.basename) # TL: Added basename to XML out
_append_str("basename_norm", self.basename_norm) # TL: Added basename_norm to XML out
_append_str("error", self.error)
_append_str("partition", self.partition)
_append_str("id", self.id)
_append_str("name_type", self.name_type)
_append_str("filesize", self.filesize)
#TODO Define a better flag for if we're going to output <alloc> elements.
if self.alloc_name is None and self.alloc_inode is None:
_append_bool("alloc", self.alloc)
else:
_append_bool("alloc_inode", self.alloc_inode)
_append_bool("alloc_name", self.alloc_name)
_append_bool("used", self.used)
_append_bool("orphan", self.orphan)
_append_str("orphan_name", self.orphan_name) # TL: Added orphan_name to XML out
_append_bool("compressed", self.compressed)
_append_str("inode", self.inode)
_append_str("meta_type", self.meta_type)
_append_str("mode", self.mode)
_append_str("nlink", self.nlink)
_append_str("uid", self.uid)
_append_str("gid", self.gid)
_append_time("mtime", self.mtime)
_append_time("ctime", self.ctime)
_append_time("atime", self.atime)
_append_time("crtime", self.crtime)
_append_str("seq", self.seq)
_append_time("dtime", self.dtime)
_append_time("bkup_time", self.bkup_time)
_append_str("link_target", self.link_target)
_append_str("libmagic", self.libmagic)
_append_externals()
_append_byte_runs("inode_brs", self.inode_brs)
_append_byte_runs("name_brs", self.name_brs)
_append_byte_runs("data_brs", self.data_brs)
_append_hash("md5", self.md5)
_append_hash("sha1", self.sha1)
_append_object("original_fileobject", self.original_fileobject, "delta:")
# TL: Added the following object to print XML elements
_append_str("app_name", self.app_name) # TL: Added app_name to XML out
_append_str("app_state", self.app_state) # TL: Added app_state to XML out
if len(diffs_whittle_set) > 0:
_logger.warning("Did not annotate all of the differing properties of this file. Remaining properties: %r." % diffs_whittle_set)
return outel
def to_dfxml(self):
return _ET_tostring(self.to_Element())
@property
def alloc(self):
"""Note that setting .alloc will affect the value of .unalloc, and vice versa. The last one to set wins."""
global _nagged_alloc
if not _nagged_alloc:
#alloc isn't deprecated yet.
#_logger.warning("The FileObject.alloc property is deprecated. Use .alloc_inode and/or .alloc_name instead. .alloc is proxied as True if alloc_inode and alloc_name are both True.")
_nagged_alloc = True
if self.alloc_inode and self.alloc_name:
return True
else:
return self._alloc
@alloc.setter
def alloc(self, val):
self._alloc = _boolcast(val)
if not self._alloc is None:
self._unalloc = not self._alloc
@property
def alloc_inode(self):
return self._alloc_inode
@alloc_inode.setter
def alloc_inode(self, val):
self._alloc_inode = _boolcast(val)
@property
def alloc_name(self):
return self._alloc_name
@alloc_name.setter
def alloc_name(self, val):
self._alloc_name = _boolcast(val)
@property
def annos(self):
"""Set of differential annotations. Expected members are the keys of this class's _diff_attr_names dictionary."""
return self._annos
@annos.setter
def annos(self, val):
_typecheck(val, set)
self._annos = val
# TL: Added app_name property getter
@property
def app_name(self):
return self._app_name
# TL: Added app_name property setter
@app_name.setter
def app_name(self, val):
self._app_name = _strcast(val)
# TL: Added app_state property getter
@property
def app_state(self):
return self._app_state
# TL: Added app_state property setter
@app_state.setter
def app_state(self, val):
self._app_state = _strcast(val)
@property
def atime(self):
return self._atime
@atime.setter
def atime(self, val):
if val is None:
self._atime = None
elif isinstance(val, TimestampObject):
self._atime = val
else:
checked_val = TimestampObject(val, name="atime")
self._atime = checked_val
# TL: Added basename property getter
@property
def basename(self):
return self._basename
# TL: Added basename property setter
@basename.setter
def basename(self, val):
self._basename = _strcast(val)
# TL: Added basename_norm property getter
@property
def basename_norm(self):
return self._basename_norm
# TL: Added basename_norm property setter
@basename_norm.setter
def basename_norm(self, val):
self._basename_norm = _strcast(val)
@property
def bkup_time(self):
return self._bkup_time
@bkup_time.setter
def bkup_time(self, val):
if val is None:
self._bkup_time = None
elif isinstance(val, TimestampObject):
self._bkup_time = val
else:
checked_val = TimestampObject(val, name="bkup_time")
self._bkup_time = checked_val
@property
def byte_runs(self):
"""This property is now a synonym for the data byte runs (.data_brs)."""
return self.data_brs
@byte_runs.setter
def byte_runs(self, val):
self.data_brs = val
@property
def compressed(self):
return self._compressed
@compressed.setter
def compressed(self, val):
self._compressed = _boolcast(val)
@property
def ctime(self):
return self._ctime
@ctime.setter
def ctime(self, val):
if val is None:
self._ctime = None
elif isinstance(val, TimestampObject):
self._ctime = val
else:
checked_val = TimestampObject(val, name="ctime")
self._ctime = checked_val
@property
def crtime(self):
return self._crtime
@crtime.setter
def crtime(self, val):
if val is None:
self._crtime = None
elif isinstance(val, TimestampObject):
self._crtime = val
else:
checked_val = TimestampObject(val, name="crtime")
self._crtime = checked_val
@property
def data_brs(self):
"""The byte runs that store the file's content."""
return self._data_brs
@data_brs.setter
def data_brs(self, val):
if not val is None:
_typecheck(val, ByteRuns)
self._data_brs = val
@property
def diffs(self):
"""This property intentionally has no setter. To populate, call compare_to_original() after assigning an original_fileobject."""
return self._diffs
@property
def dtime(self):
return self._dtime
@dtime.setter
def dtime(self, val):
if val is None:
self._dtime = None
elif isinstance(val, TimestampObject):
self._dtime = val
else:
checked_val = TimestampObject(val, name="dtime")
self._dtime = checked_val
@property
def error(self):
return self._error
@error.setter
def error(self, val):
self._error = _strcast(val)
@property
def filename(self):
return self._filename
@filename.setter
def filename(self, val):
self._filename = _strcast(val)
# TL: Added filename_norm property getter
@property
def filename_norm(self):
return self._filename_norm
# TL: Added filename_norm property setter
@filename_norm.setter
def filename_norm(self, val):
self._filename_norm = _strcast(val)
@property
def externals(self):
"""
This property exposes XML elements of other namespaces. Since these elements can be of arbitrary complexity, this list is solely comprised ofxml.etree.ElementTree.Element objects. The tags must be a fully-qualified namespace (of the pattern {URI}localname). If generating the Elements with a script instead of de-serializing from XML, you should issue an ElementTree register_namespace call with your namespace abbreviation prefix.
NOTE: Diffs are currently NOT computed for external elements.
NOTE: This property should be considered unstable, as the interface is in an early design phase. Please notify the maintainers of this library (see the Git history for the Objects.py file) if you are using this interface and wish to be notified of updates."""
return self._externals
@externals.setter
def externals(self, val):
_typecheck(val, OtherNSElementList)
self._externals = val
@property
def filesize(self):
return self._filesize
@filesize.setter
def filesize(self, val):
self._filesize = _intcast(val)
@property
def gid(self):
return self._gid
@gid.setter
def gid(self, val):
self._gid = _strcast(val)
@property
def id(self):
return self._id
@id.setter
def id(self, val):
self._id = _intcast(val)
@property
def inode(self):
return self._inode
@inode.setter
def inode(self, val):
self._inode = _intcast(val)
@property
def libmagic(self):
return self._libmagic
@libmagic.setter
def libmagic(self, val):
self._libmagic = _strcast(val)
@property
def inode_brs(self):
"""The byte run(s) that represents the file's metadata object (the inode or the MFT entry). In file systems that do not distinguish between inode and directory entry, e.g. FAT, .inode_brs should be equivalent to .name_brs, if both fields are present."""
return self._inode_brs
@inode_brs.setter
def inode_brs(self, val):
if not val is None:
_typecheck(val, ByteRuns)
self._inode_brs = val
@property
def md5(self):
return self._md5
@md5.setter
def md5(self, val):
self._md5 = _strcast(val)
@property
def meta_type(self):
return self._meta_type
@meta_type.setter
def meta_type(self, val):
self._meta_type = _intcast(val)
@property
def mode(self):
"""The security mode is represented in the FileObject as a base-10 integer. It is also serialized as a decimal integer."""
return self._mode
@mode.setter
def mode(self, val):
self._mode = _intcast(val)
@property
def mtime(self):
return self._mtime
@mtime.setter
def mtime(self, val):
if val is None:
self._mtime = None
elif isinstance(val, TimestampObject):
self._mtime = val
else:
checked_val = TimestampObject(val, name="mtime")
self._mtime = checked_val
@property
def name_brs(self):
"""The byte run(s) that represents the file's name object (the directory entry). In file systems that do not distinguish between inode and directory entry, e.g. FAT, .inode_brs should be equivalent to .name_brs, if both fields are present."""
return self._name_brs
@name_brs.setter
def name_brs(self, val):
if not val is None:
_typecheck(val, ByteRuns)
self._name_brs = val
@property
def name_type(self):
return self._name_type
@name_type.setter
def name_type(self, val):
if val is None:
self._name_type = val
else:
cast_val = _strcast(val)
if cast_val not in ["-", "p", "c", "d", "b", "r", "l", "s", "h", "w", "v"]:
raise ValueError("Unexpected name_type received: %r (casted to %r)." % (val, cast_val))
self._name_type = cast_val
@property
def nlink(self):
return self._nlink
@nlink.setter
def nlink(self, val):
self._nlink = _intcast(val)
@property
def orphan(self):
return self._orphan
@orphan.setter
def orphan(self, val):
self._orphan = _boolcast(val)
# TL: Added orphan_name property getter
@property
def orphan_name(self):
return self._orphan_name
# TL: Added orphan_name property setter
@orphan_name.setter
def orphan_name(self, val):
self._orphan_name = _strcast(val)
@property
def original_fileobject(self):
return self._original_fileobject
@original_fileobject.setter
def original_fileobject(self, val):
if not val is None:
_typecheck(val, FileObject)
self._original_fileobject = val
@property
def partition(self):
return self._partition
@partition.setter
def partition(self, val):
self._partition = _intcast(val)
@property
def parent_object(self):
"""This object is an extremely sparse FileObject, containing just identifying information. Alternately, it can be an entire object reference to the parent Object, though uniqueness should be checked."""
return self._parent_object
@parent_object.setter
def parent_object(self, val):
if not val is None:
_typecheck(val, FileObject)
self._parent_object = val
@property
def seq(self):
return self._seq
@seq.setter
def seq(self, val):
self._seq = _intcast(val)
@property
def sha1(self):
return self._sha1
@sha1.setter
def sha1(self, val):
self._sha1 = _strcast(val)
@property
def uid(self):
return self._uid
@uid.setter
def uid(self, val):
self._uid = _strcast(val)
@property
def unalloc(self):
"""Note that setting .unalloc will affect the value of .alloc, and vice versa. The last one to set wins."""
return self._unalloc
@unalloc.setter
def unalloc(self, val):
self._unalloc = _boolcast(val)
if not self._unalloc is None:
self._alloc = not self._unalloc
@property
def unused(self):
return self._used
@unused.setter
def unused(self, val):
self._unused = _intcast(val)
if not self._unused is None:
self._used = not self._unused
@property
def used(self):
return self._used
@used.setter
def used(self, val):
self._used = _intcast(val)
if not self._used is None:
self._unused = not self._used
@property
def volume_object(self):
"""Reference to the containing volume object. Not meant to be propagated with __repr__ or to_Element()."""
return self._volume_object
@volume_object.setter
def volume_object(self, val):
if not val is None:
_typecheck(val, VolumeObject)
self._volume_object = val
class OtherNSElementList(list):
#Note that super() must be called with arguments to work in Python 2.
@classmethod
def _check_qname(cls, tagname):
(ns, ln) = _qsplit(tagname)
if ns == dfxml.XMLNS_DFXML:
raise ValueError("'External' elements must be a non-DFXML namespace.")
#Register qname for later output
#TODO Devise a module-level interface for namespace abreviations.
def __repr__(self):
#Unwrap the string representation of this class's type name (necessary because we don't necessarily know if it'll be Objects.Other... or just Other...).
_typestr = str(type(self))[ len("<class '") : -len("'>") ]
return _typestr + "(" + super(OtherNSElementList, self).__repr__() + ")"
def __setitem__(self, idx, value):
_typecheck(value, ET.Element)
OtherNSElementList._check_qname(value.tag)
super(OtherNSElementList, self).__setitem__(idx, value)
def append(self, value):
_typecheck(value, ET.Element)
OtherNSElementList._check_qname(value.tag)
super(OtherNSElementList, self).append(value)
class CellObject(object):
_all_properties = set([
"alloc",
"app_name", # TL: Added app_name property
"app_state", # TL: Added app_state property
"annos",
"basename",
"basename_norm", # TL: Added basename_norm property
"byte_runs",
"cellpath",
"cellpath_norm", # TL: Added cellpath_norm property
"data",
"data_conversions",
"data_encoding", # TL: Added data_encoding element
"data_raw", # TL: Added data_raw element
"data_type",
"error",
"mtime",
"name_type",
"original_cellobject",
"parent_object",
"root",
"rootkey" # TL: Added rootkey element
])
_diff_attr_names = {
"new":"delta:new_cell",
"deleted":"delta:deleted_cell",
"changed":"delta:changed_cell",
"modified":"delta:modified_cell",
"matched":"delta:matched",
"matched_soft":"delta:matched_soft" # TL: Added a soft match delta
}
#TODO There may be need in the future to compare the annotations as well.
_incomparable_properties = set([
"annos"
])
def __init__(self, *args, **kwargs):
#These properties must be assigned first for sanity check dependencies
self.name_type = kwargs.get("name_type")
for prop in CellObject._all_properties:
if prop == "annos":
setattr(self, prop, kwargs.get(prop, set()))
else:
setattr(self, prop, kwargs.get(prop))
self._diffs = set()
def __eq__(self, other):
if other is None:
return False
_typecheck(other, CellObject)
for prop in CellObject._all_properties:
if prop in CellObject._incomparable_properties:
continue
if getattr(self, prop) != getattr(other, prop):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parts = []
for prop in sorted(list(CellObject._all_properties)):
if not getattr(self, prop) is None:
parts.append("%s=%r" % (prop, getattr(self, prop)))
return "CellObject(" + ", ".join(parts) + ")"
def compare_to_original(self):
self._diffs = self.compare_to_other(self.original_cellobject, True)
def compare_to_other(self, other, ignore_original=False, ignore_properties=set()):
_typecheck(other, CellObject)
diffs = set()
for propname in CellObject._all_properties:
if propname in CellObject._incomparable_properties:
continue
if ignore_original and propname == "original_cellobject":
continue
# TL: Added ignore_properties check
# Can pass a set() of properties to ignore
# e.g., {"cellpath", "basename"}
if propname in ignore_properties:
continue
oval = getattr(other, propname)
sval = getattr(self, propname)
if oval is None and sval is None:
continue
if oval != sval:
#_logger.debug("propname, oval, sval: %r, %r, %r" % (propname, oval, sval))
diffs.add(propname)
return diffs
def populate_from_Element(self, e):
"""Populates this CellObject's properties from an ElementTree Element. The Element need not be retained."""
global _warned_elements
_typecheck(e, (ET.Element, ET.ElementTree))
_read_differential_annotations(CellObject._diff_attr_names, e, self.annos)
#Split into namespace and tagname
(ns, tn) = _qsplit(e.tag)
assert tn in ["cellobject", "original_cellobject", "parent_object"]
if e.attrib.get("root"):
self.root = e.attrib["root"]
#Look through direct-child elements for other properties
for ce in e.findall("./*"):
(cns, ctn) = _qsplit(ce.tag)
if ctn == "alloc":
self.alloc = ce.text
elif ctn == "basename":
self.basename = ce.text
# TL: Added basename_norm to be populated
elif ctn == "basename_norm":
self.basename_norm = ce.text
elif ctn == "byte_runs":
self.byte_runs = ByteRuns()
self.byte_runs.populate_from_Element(ce)
elif ctn == "cellpath":
self.cellpath = ce.text
# TL: Added cellpath_norm to be populated
elif ctn == "cellpath_norm":
self.cellpath_norm = ce.text
elif ctn == "data":
self.data = ce.text
if ce.attrib.get("encoding"):
self.data_encoding = ce.attrib["encoding"]
# TL: Added data encoding set directly from XML tag
# This is different from above, where encoding in an XML attribute in
# this data tag
elif ctn == "data_encoding":
self.data_encoding = ce.text
# TL: Added raw data element to be populated
elif ctn == "data_raw":
self.data_raw = ce.text
elif ctn == "data_conversions":
self.data_conversions = dict()
for cce in ce:
if cce.tag == "int":
self.data_conversions["int"] = int()
elif cce.tag == "string":
self.data_conversions["string"] = cce.text
elif cce.tag == "string_list":
self.data_conversions["string_list"] = []
for ccce in cce:
self.data_conversions["string_list"].append(ccce.text)
elif ctn == "data_type":
self.data_type = ce.text
elif ctn == "error":
self.error = ce.text
elif ctn == "mtime":
self.mtime = TimestampObject()
self.mtime.populate_from_Element(ce)
elif ctn == "name_type":
self.name_type = ce.text
elif ctn == "original_cellobject":
self.original_cellobject = CellObject()
self.original_cellobject.populate_from_Element(ce)
elif ctn == "parent_object":
self.parent_object = CellObject()
self.parent_object.populate_from_Element(ce)
# TL: Added app_state to be populated
elif ctn == "app_state":
self.app_state = ce.text
# TL: Added app_name to be populated
elif ctn == "app_name":
self.app_name = ce.text
# TL: Added rootkey to be populated
elif ctn == "rootkey":
self.rootkey = ce.text
else:
if (cns, ctn) not in _warned_elements:
_warned_elements.add((cns, ctn))
_logger.warning("Uncertain what to do with this element: %r" % ce)
self.sanity_check()
def sanity_check(self):
if self.name_type and self.name_type != "k":
if self.mtime:
_logger.info("Error occurred sanity-checking this CellObject: %r." % self)
raise ValueError("A Registry Key (node) is the only kind of CellObject that can have a timestamp.")
if self.root:
_logger.info("Error occurred sanity-checking this CellObject: %r." % self)
raise ValueError("A Registry Key (node) is the only kind of CellObject that can have the 'root' attribute.")
def to_Element(self):
self.sanity_check()
outel = ET.Element("cellobject")
annos_whittle_set = copy.deepcopy(self.annos)
diffs_whittle_set = copy.deepcopy(self.diffs)
for annodiff in CellObject._diff_attr_names:
if annodiff in annos_whittle_set:
outel.attrib[CellObject._diff_attr_names[annodiff]] = "1"
annos_whittle_set.remove(annodiff)
if len(annos_whittle_set) > 0:
_logger.warning("Failed to export some differential annotations: %r." % annos_whittle_set)
def _anno_change(el):
if el.tag in self.diffs:
el.attrib["delta:changed_property"] = "1"
diffs_whittle_set.remove(el.tag)
#Do an additional check for data_encoding, which is serialized as an attribute.
if el.tag == "data" and "data_encoding" in self.diffs:
el.attrib["delta:changed_property"] = "1"
diffs_whittle_set.remove("data_encoding")
def _append_bool(name, value):
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element(name)
if not value is None:
tmpel.text = "1" if value else "0"
_anno_change(tmpel)
outel.append(tmpel)
#Recall that Element text must be a string
def _append_str(name, value):
if not value is None or name in diffs_whittle_set:
tmpel = ET.Element(name)
if not value is None:
tmpel.text = str(value)
_anno_change(tmpel)
if name == "data" and not self.data_encoding is None:
tmpel.attrib["encoding"] = self.data_encoding
outel.append(tmpel)
def _append_object(name, value, namespace_prefix=None): # TL: Added prefix
if not value is None or name in diffs_whittle_set:
if value is None:
tmpel = ET.Element(name)
else:
tmpel = value.to_Element()
# TL: Added for prefix support
#Set the tag name here for properties like parent_object, a FileObject without being wholly a FileObject.
if namespace_prefix:
tmpel.tag = namespace_prefix + name
else:
tmpel.tag = name
_anno_change(tmpel)
outel.append(tmpel)
#TODO root should be an element too. Revise schema.
if self.root:
outel.attrib["root"] = str(self.root)
_append_str("cellpath", self.cellpath)
_append_str("cellpath_norm", self.cellpath_norm) # TL: Added cellpath_norm to XML out
_append_str("basename", self.basename)
_append_str("basename_norm", self.basename_norm) # TL: Added basename_norm to XML out
_append_str("error", self.error)
_append_str("name_type", self.name_type)
_append_bool("alloc", self.alloc)
_append_object("mtime", self.mtime)
_append_str("data_type", self.data_type)
_append_str("data", self.data)
_append_str("data_raw", self.data_raw) # TL: Added data_raw to XML out
_append_str("app_name", self.app_name) # TL: Added app_name to XML out
_append_str("app_state", self.app_state) # TL: Added app_state to XML out
_append_str("rootkey", self.rootkey) # TL: Added rootkey to XML out
#The experimental conversions element needs its own code
if not self.data_conversions is None or "data_conversions" in diffs_whittle_set:
tmpel = ET.Element("data_conversions")
if not self.data_conversions is None:
if "int" in self.data_conversions:
tmpcel = ET.Element("int")
tmpcel.text = str(self.data_conversions["int"])
tmpel.append(tmpcel)
if "string" in self.data_conversions:
tmpcel = ET.Element("string")
tmpcel.text = str(self.data_conversions["string"])
tmpel.append(tmpcel)
if "string_list" in self.data_conversions:
tmpcel = ET.Element("string_list")
for s in self.data_conversions["string"]:
tmpccel = ET.Element("string")
tmpccel.text = s
tmpcel.append(tmpccel)
tmpel.append(tmpcel)
_anno_change(tmpel)
outel.append(tmpel)
_append_object("byte_runs", self.byte_runs)
#_append_object("original_cellobject", self.original_cellobject)
# TL: Added delta to original cellobject for printing
_append_object("original_cellobject", self.original_cellobject, "delta:")
if len(diffs_whittle_set) > 0:
_logger.warning("Did not annotate all of the differing properties of this file. Remaining properties: %r." % diffs_whittle_set)
return outel
def to_regxml(self):
return _ET_tostring(self.to_Element())
@property
def alloc(self):
return self._alloc
@alloc.setter
def alloc(self, val):
self._alloc = _boolcast(val)
@property
def annos(self):
"""Set of differential annotations. Expected members are the keys of this class's _diff_attr_names dictionary."""
return self._annos
@annos.setter
def annos(self, val):
_typecheck(val, set)
self._annos = val
# TL: Added app_name property getter
@property
def app_name(self):
return self._app_name
# TL: Added app_name property setter
@app_name.setter
def app_name(self, val):
self._app_name = _strcast(val)
# TL: Added app_state property getter
@property
def app_state(self):
return self._app_state
# TL: Added app_state property setter
@app_state.setter
def app_state(self, val):
self._app_state = _strcast(val)
@property
def basename(self):
return self._basename
@basename.setter
def basename(self, val):
if not val is None:
_typecheck(val, str)
self._basename = val
# TL: Added basename_norm property getter
@property
def basename_norm(self):
return self._basename_norm
# TL: Added basename_norm property setter
@basename_norm.setter
def basename_norm(self, val):
self._basename_norm = _strcast(val)
@property
def byte_runs(self):
return self._byte_runs
@byte_runs.setter
def byte_runs(self, val):
if not val is None:
_typecheck(val, ByteRuns)
self._byte_runs = val
@property
def cellpath(self):
return self._cellpath
@cellpath.setter
def cellpath(self, val):
if not val is None:
_typecheck(val, str)
self._cellpath = val
# TL: Added cellpath_norm property getter
@property
def cellpath_norm(self):
return self._cellpath_norm
# TL: Added cellpath_norm property setter
@cellpath_norm.setter
def cellpath_norm(self, val):
self._cellpath_norm = _strcast(val)
@property
def data(self):
"""Expecting a base64-encoded string. See conversions (according to the Hive parser's library) in data_conversions property."""
return self._data
@data.setter
def data(self, val):
if not val is None:
_typecheck(val, str)
self._data = val
@property
def data_conversions(self):
return self._data_conversions
@data_conversions.setter
def data_conversions(self, val):
if not val is None:
_typecheck(val, dict)
self._data_conversions = val
@property
def data_encoding(self):
"""Expecting a string, typically 'base64'."""
return self._data_encoding
@data_encoding.setter
def data_encoding(self, val):
if not val is None:
_typecheck(val, str)
self._data_encoding = val
# TL: Added data_raw getter
@property
def data_raw(self):
return self._data_raw
# TL: Added data_raw setter
@data_raw.setter
def data_raw(self, val):
if not val is None:
_typecheck(val, str)
self._data_raw = val
@property
def data_type(self):
"""Expecting a string, e.g. "REG_MULTI_SZ", or an int, because value type is known to be overloaded as an integer storage field in some cells."""
return self._data_type
@data_type.setter
def data_type(self, val):
# TL: Added conversion of Registry (zimmerman) Registry value data
# naming conventions to fit Objects.py naming conventions
if val == "RegNone": val = "REG_NONE"
elif val == "RegSz": val = "REG_SZ"
elif val == "RegExpandSz": val = "REG_EXPAND_SZ"
elif val == "RegBinary": val = "REG_BINARY"
elif val == "RegDword": val = "REG_DWORD"
elif val == "RegDwordBigEndian": val = "REG_DWORD"
elif val == "RegLink": val = "REG_LINK"
elif val == "RegMultiSz": val = "REG_MULTI_SZ"
elif val == "RegResourceList": val = "REG_RESOURCE_LIST"
elif val == "RegFullResourceDescription": val = "REG_FULL_RESOURCE_DESCRIPTOR"
elif val == "RegResourceRequirementsList": val = "REG_RESOURCE_REQUIREMENTS_LIST"
elif val == "RegQword": val = "REG_QWORD"
# TL: Added RegFileTime, represent as BINARY
elif val == "RegFileTime": val = "REG_BINARY"
# TL: Added 14 + 12, represented as BINARY
elif val == "14": val = "REG_BINARY"
elif val == "12": val = "REG_BINARY"
# Not 100% sure about the Registry library type of RegUnknown
# Lets set it to no type, just to be safe
elif val == "RegUnknown": val = "REG_NONE"
# TL: Some recovered cells have incorrect data_type
# If the data_type is an integer, set it to binary
#else:
# val = "REG_BINARY"
if not val in [
None,
"REG_NONE",
"REG_SZ",
"REG_EXPAND_SZ",
"REG_BINARY",
"REG_DWORD",
"REG_DWORD_BIG_ENDIAN",
"REG_DWORD_LITTLE_ENDIAN",
"REG_QWORD_LITTLE_ENDIAN",
"REG_LINK",
"REG_MULTI_SZ",
"REG_RESOURCE_LIST",
"REG_FULL_RESOURCE_DESCRIPTOR",
"REG_RESOURCE_REQUIREMENTS_LIST",
"REG_QWORD"
]:
if not isinstance(val, int) or (isinstance(val, str) and val.isdigit()):
raise ValueError("Unexpected value data type received: %r, type %r." % (val, type(val)))
self._data_type = val
@property
def diffs(self):
return self._diffs
@diffs.setter
def diffs(self, value):
_typecheck(value, set)
self._diffs = value
@property
def error(self):
return self._error
@error.setter
def error(self, value):
if not value is None:
_typecheck(value, str)
self._error = value
@property
def hive_object(self):
"""Reference to the containing hive object. Not meant to be propagated with __repr__ or to_Element()."""
return self._hive_object
@hive_object.setter
def hive_object(self, val):
if not val is None:
_typecheck(val, HiveObject)
self._hive_object = val
@property
def mtime(self):
return self._mtime
@mtime.setter
def mtime(self, val):
if val is None:
self._mtime = None
elif isinstance(val, TimestampObject):
self._mtime = val
else:
self._mtime = TimestampObject(val, name="mtime")
self.sanity_check()
@property
def name_type(self):
return self._name_type
@name_type.setter
def name_type(self, val):
if not val is None:
assert val in ["k", "v"]
self._name_type = val
@property
def original_cellobject(self):
return self._original_cellobject
@original_cellobject.setter
def original_cellobject(self, val):
if not val is None:
_typecheck(val, CellObject)
self._original_cellobject = val
@property
def parent_object(self):
"""This object is an extremely sparse CellObject, containing just identifying information. Alternately, it can be an entire object reference to the parent Object, though uniqueness should be checked."""
return self._parent_object
@parent_object.setter
def parent_object(self, val):
if not val is None:
_typecheck(val, CellObject)
self._parent_object = val
@property
def root(self):
return self._root
@root.setter
def root(self, val):
self._root = _boolcast(val)
# TL: Added rootkey property getter
@property
def rootkey(self):
return self._rootkey
# TL: Added rootkey property setter
@rootkey.setter
def rootkey(self, val):
self._rootkey = _strcast(val)
def iterparse(filename, events=("start","end"), **kwargs):
"""
Generator. Yields a stream of populated DFXMLObjects, VolumeObjects and FileObjects, paired with an event type ("start" or "end"). The DFXMLObject and VolumeObjects do NOT have their child lists populated with this method - that is left to the calling program.
The event type interface is meant to match the interface of ElementTree's iterparse; this is simply for familiarity's sake. DFXMLObjects and VolumeObjects are yielded with "start" when the stream of VolumeObject or FileObjects begins - that is, they are yielded after being fully constructed up to the potentially-lengthy child object stream. FileObjects are yielded only with "end".
@param filename: A string
@param events: Events. Optional. A tuple of strings, containing "start" and/or "end".
@param dfxmlobject: A DFXMLObject document. Optional. A DFXMLObject is created and yielded in the object stream if this argument is not supplied.
@param fiwalk: Optional. Path to a particular fiwalk build you want to run.
"""
#The DFXML stream file handle.
fh = None
subp = None
import platform
if platform.system() == "Windows":
fiwalk_loc = "fiwalk" + os.sep + "fiwalk-4.2.0.exe"
fiwalk_path = kwargs.get(fiwalk_loc, fiwalk_loc)
else:
fiwalk_path = kwargs.get("fiwalk", "fiwalk")
#subp_command = [fiwalk_path, "-x", filename]
#subp_command = [fiwalk_path, "-z", "-M", "-x", filename]
subp_command = [fiwalk_path, "-z", "-g", "-b", "-x", filename]
if filename.endswith("xml"):
fh = open(filename, "rb")
else:
subp = subprocess.Popen(subp_command, stdout=subprocess.PIPE)
fh = subp.stdout
_events = set()
for e in events:
if not e in ("start","end"):
raise ValueError("Unexpected event type: %r. Expecting 'start', 'end'." % e)
_events.add(e)
dobj = kwargs.get("dfxmlobject", DFXMLObject())
#The only way to efficiently populate VolumeObjects is to populate the object when the stream has hit its first FileObject.
vobj = None
#It doesn't seem ElementTree allows fetching parents of Elements that are incomplete (just hit the "start" event). So, build a volume Element when we've hit "<volume ... >", glomming all elements until the first fileobject is hit.
#Likewise with the Element for the DFXMLObject.
dfxml_proxy = None
volume_proxy = None
#State machine, used to track when the first fileobject of a volume is encountered.
READING_START = 0
READING_PRESTREAM = 1 #DFXML metadata, pre-Object stream
READING_VOLUMES = 2
READING_FILES = 3
READING_POSTSTREAM = 4 #DFXML metadata, post-Object stream (typically the <rusage> element)
_state = READING_START
for (ETevent, elem) in ET.iterparse(fh, events=("start-ns", "start", "end")):
#View the object event stream in debug mode
#_logger.debug("(event, elem) = (%r, %r)" % (ETevent, elem))
#if ETevent in ("start", "end"):
# _logger.debug("_ET_tostring(elem) = %r" % _ET_tostring(elem))
#Track namespaces
if ETevent == "start-ns":
dobj.add_namespace(*elem)
ET.register_namespace(*elem)
continue
#Split tag name into namespace and local name
(ns, ln) = _qsplit(elem.tag)
if ETevent == "start":
if ln == "dfxml":
if _state != READING_START:
raise ValueError("Encountered a <dfxml> element, but the parser isn't in its start state. Recursive <dfxml> declarations aren't supported at this time.")
dfxml_proxy = ET.Element(elem.tag)
for k in elem.attrib:
#Note that xmlns declarations don't appear in elem.attrib.
dfxml_proxy.attrib[k] = elem.attrib[k]
_state = READING_PRESTREAM
elif ln == "volume":
if _state == READING_PRESTREAM:
#Cut; yield DFXMLObject now.
dobj.populate_from_Element(dfxml_proxy)
if "start" in _events:
yield ("start", dobj)
#Start populating a new Volume proxy.
volume_proxy = ET.Element(elem.tag)
for k in elem.attrib:
volume_proxy.attrib[k] = elem.attrib[k]
_state = READING_VOLUMES
elif ln == "fileobject":
if _state == READING_PRESTREAM:
#Cut; yield DFXMLObject now.
dobj.populate_from_Element(dfxml_proxy)
if "start" in _events:
yield ("start", dobj)
elif _state == READING_VOLUMES:
#_logger.debug("Encountered a fileobject while reading volume properties. Yielding volume now.")
#Cut; yield VolumeObject now.
if volume_proxy is not None:
vobj = VolumeObject()
vobj.populate_from_Element(volume_proxy)
if "start" in _events:
yield ("start", vobj)
#Reset
volume_proxy.clear()
volume_proxy = None
_state = READING_FILES
elif ETevent == "end":
if ln == "fileobject":
if _state in (READING_PRESTREAM, READING_POSTSTREAM):
#This particular branch can be reached if there are trailing fileobject elements after the volume element. This would happen if a tool needed to represent files (likely reassembled fragments) found outside all the partitions.
#More frequently, we hit this point when there are no volume groupings.
vobj = None
fi = FileObject()
fi.populate_from_Element(elem)
fi.volume_object = vobj
#_logger.debug("fi = %r" % fi)
if "end" in _events:
yield ("end", fi)
#Reset
elem.clear()
elif elem.tag == "dfxml":
if "end" in _events:
yield ("end", dobj)
elif elem.tag == "volume":
if "end" in _events:
yield ("end", vobj)
_state = READING_POSTSTREAM
elif _state == READING_VOLUMES:
#This is a volume property; glom onto the proxy.
if volume_proxy is not None:
volume_proxy.append(elem)
elif _state == READING_PRESTREAM:
if ln in ["metadata", "creator", "source"]:
#This is a direct child of the DFXML document property; glom onto the proxy.
if dfxml_proxy is not None:
dfxml_proxy.append(elem)
#If we called Fiwalk, double-check that it exited successfully.
if not subp is None:
_logger.debug("Calling wait() to let the Fiwalk subprocess terminate...") #Just reading from subp.stdout doesn't let the process terminate; it only finishes working.
subp.wait()
if subp.returncode != 0:
e = subprocess.CalledProcessError("There was an error running Fiwalk.")
e.returncode = subp.returncode
e.cmd = subp_command
raise e
_logger.debug("...Done.")
def iterparse_CellObjects(filename, events=("start","end"), **kwargs):
""" Iterparse implementation for RegXML stdout from CellXML. """
#The DFXML stream file handle.
fh = None
subp = None
import platform
import subprocess
if platform.system() == "Windows":
cellxml_loc = "CellXML-Registry-1.3.1" + os.sep + "CellXML-Registry-1.3.1.exe"
else:
print("Error. Cannot parse hives using CellXML on Linux")
return
# Perform a quick test to ensure hive file is parsable
# This uses the -c feature in CellXML-Registry
if not filename.endswith("xml"):
testcmd = [cellxml_loc, '-c', '-f', filename]
p = subprocess.Popen(testcmd,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
bufsize = -1)
output, error = p.communicate()
# If exit code of CellXML-Registry is not 0, exit.
# Probably should not silently exit (add error in future)
if p.returncode != 0:
return
#subp_command = [cellxml_loc, "-f", filename]
subp_command = [cellxml_loc, "-r", "-f", filename]
if filename.endswith("xml"):
fh = open(filename, "rb")
else:
subp = subprocess.Popen(subp_command, stdout=subprocess.PIPE)
fh = subp.stdout
#The RegXML stream file handle.
#fh = open(filename, "rb")
_events = set()
for e in events:
if not e in ("start","end"):
raise ValueError("Unexpected event type: %r. Expecting 'start', 'end'." % e)
_events.add(e)
robj = kwargs.get("regxmlobject", RegXMLObject())
hobj = kwargs.get("hiveobject", HiveObject())
cobj = kwargs.get("cellobject", CellObject())
#It doesn't seem ElementTree allows fetching parents of Elements that are incomplete (just hit the "start" event). So, build a volume Element when we've hit "<volume ... >", glomming all elements until the first fileobject is hit.
#Likewise with the Element for the DFXMLObject.
regxml_proxy = None
hive_proxy = None
msregistry_proxy = None
#State machine, used to track when the first fileobject of a volume is encountered.
READING_START = 0
READING_PRESTREAM = 1 #DFXML metadata, pre-Object stream
READING_VOLUMES = 2
READING_FILES = 3
READING_POSTSTREAM = 4 #DFXML metadata, post-Object stream (typically the <rusage> element)
_state = READING_START
for (ETevent, elem) in ET.iterparse(fh, events=("start-ns", "start", "end")):
#View the object event stream in debug mode
#_logger.debug("(event, elem) = (%r, %r)" % (ETevent, elem))
#if ETevent in ("start", "end"):
# _logger.debug("_ET_tostring(elem) = %r" % _ET_tostring(elem))
#Track namespaces
if ETevent == "start-ns":
robj.add_namespace(*elem)
ET.register_namespace(*elem)
continue
#Split tag name into namespace and local name
(ns, ln) = _qsplit(elem.tag)
#print(ns,ln)
if ETevent == "start":
if ln == "msregistry" or ln == "hive":
# if _state != READING_START:
# raise ValueError("Encountered a <msregistry> element, but the parser isn't in its start state. Recursive <msregistry> declarations aren't supported at this time.")
hive_proxy = ET.Element(elem.tag)
for k in elem.attrib:
hive_proxy.attrib[k] = elem.attrib[k]
_state = READING_PRESTREAM
elif ETevent == "end":
if ln == "cellobject":
if _state in (READING_PRESTREAM, READING_POSTSTREAM):
#This particular branch can be reached if there are trailing fileobject elements after the volume element. This would happen if a tool needed to represent files (likely reassembled fragments) found outside all the partitions.
#More frequently, we hit this point when there are no volume groupings.
vobj = None
co = CellObject()
co.populate_from_Element(elem)
#fi.volume_object = vobj
#_logger.debug("fi = %r" % fi)
if "end" in _events:
yield ("end", co)
#Reset
elem.clear()
elif elem.tag == "msregistry" or elem.tag == "hive":
if "end" in _events:
yield ("end", robj)
_state = READING_POSTSTREAM
elif _state == READING_PRESTREAM:
if ln in ["metadata", "creator", "source"]:
#This is a direct child of the DFXML document property; glom onto the proxy.
if regxml_proxy is not None:
regxml_proxy.append(elem)
def parse(filename):
"""Returns a DFXMLObject populated from the contents of the (string) filename argument."""
retval = None
appender = None
for (event, obj) in iterparse(filename):
if event == "start":
if isinstance(obj, DFXMLObject):
retval = obj
appender = obj
elif isinstance(obj, VolumeObject):
retval.append(obj)
appender = obj
elif event == "end":
if isinstance(obj, VolumeObject):
appender = retval
elif isinstance(obj, FileObject):
appender.append(obj)
return retval
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
logging.basicConfig(level=logging.DEBUG)
#Run unit tests
assert _intcast(-1) == -1
assert _intcast("-1") == -1
assert _qsplit("{http://www.w3.org/2001/XMLSchema}all") == ("http://www.w3.org/2001/XMLSchema","all")
assert _qsplit("http://www.w3.org/2001/XMLSchema}all") == (None, "http://www.w3.org/2001/XMLSchema}all")
fi = FileObject()
#Check property setting
fi.mtime = "1999-12-31T23:59:59Z"
_logger.debug("fi = %r" % fi)
#Check bad property setting
failed = None
try:
fi.mtime = "Not a timestamp"
failed = False
except:
failed = True
_logger.debug("fi = %r" % fi)
_logger.debug("failed = %r" % failed)
assert failed
t0 = TimestampObject(prec="100ns", name="mtime")
_logger.debug("t0 = %r" % t0)
assert t0.prec[0] == 100
assert t0.prec[1] == "ns"
t1 = TimestampObject("2009-01-23T01:23:45Z", prec="2", name="atime")
_logger.debug("t1 = %r" % t1)
assert t1.prec[0] == 2
assert t1.prec[1] == "s"
print("Unit tests passed.")
|
thomaslaurenson/Vestigium
|
dfxml/Objects.py
|
Python
|
gpl-2.0
| 127,176
|
from action import *
from conn import *
from logic import *
from log import *
from protocol import *
from schedule import *
from server import *
from util import *
|
goripcity/IceServer
|
server/__init__.py
|
Python
|
gpl-2.0
| 164
|
from django.core.exceptions import ImproperlyConfigured
from django import VERSION
if VERSION[0]<1 and VERSION[1]<7:
raise ImproperlyConfigured("""
The app_config required 1.7 or higher version of django.
""")
from .core import AppCache
|
hsfzxjy/wisecitymbc
|
site_packages/app_cache/__init__.py
|
Python
|
gpl-2.0
| 258
|
"""test passlib.hosts"""
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import logging; log = logging.getLogger(__name__)
import warnings
# site
# pkg
from lib.passlib import hosts, hash as hashmod
from lib.passlib.utils import unix_crypt_schemes
from lib.passlib.tests.utils import TestCase
# module
#=============================================================================
# test predefined app contexts
#=============================================================================
class HostsTest(TestCase):
"""perform general tests to make sure contexts work"""
# NOTE: these tests are not really comprehensive,
# since they would do little but duplicate
# the presets in apps.py
#
# they mainly try to ensure no typos
# or dynamic behavior foul-ups.
def check_unix_disabled(self, ctx):
for hash in [
"",
"!",
"*",
"!$1$TXl/FX/U$BZge.lr.ux6ekjEjxmzwz0",
]:
self.assertEqual(ctx.identify(hash), 'unix_disabled')
self.assertFalse(ctx.verify('test', hash))
def test_linux_context(self):
ctx = hosts.linux_context
for hash in [
('$6$rounds=41128$VoQLvDjkaZ6L6BIE$4pt.1Ll1XdDYduEwEYPCMOBiR6W6'
'znsyUEoNlcVXpv2gKKIbQolgmTGe6uEEVJ7azUxuc8Tf7zV9SD2z7Ij751'),
('$5$rounds=31817$iZGmlyBQ99JSB5n6$p4E.pdPBWx19OajgjLRiOW0itGny'
'xDGgMlDcOsfaI17'),
'$1$TXl/FX/U$BZge.lr.ux6ekjEjxmzwz0',
'kAJJz.Rwp0A/I',
]:
self.assertTrue(ctx.verify("test", hash))
self.check_unix_disabled(ctx)
def test_bsd_contexts(self):
for ctx in [
hosts.freebsd_context,
hosts.openbsd_context,
hosts.netbsd_context,
]:
for hash in [
'$1$TXl/FX/U$BZge.lr.ux6ekjEjxmzwz0',
'kAJJz.Rwp0A/I',
]:
self.assertTrue(ctx.verify("test", hash))
h1 = "$2a$04$yjDgE74RJkeqC0/1NheSSOrvKeu9IbKDpcQf/Ox3qsrRS/Kw42qIS"
if hashmod.bcrypt.has_backend():
self.assertTrue(ctx.verify("test", h1))
else:
self.assertEqual(ctx.identify(h1), "bcrypt")
self.check_unix_disabled(ctx)
def test_host_context(self):
ctx = getattr(hosts, "host_context", None)
if not ctx:
return self.skipTest("host_context not available on this platform")
# validate schemes is non-empty,
# and contains unix_disabled + at least one real scheme
schemes = list(ctx.schemes())
self.assertTrue(schemes, "appears to be unix system, but no known schemes supported by crypt")
self.assertTrue('unix_disabled' in schemes)
schemes.remove("unix_disabled")
self.assertTrue(schemes, "should have schemes beside fallback scheme")
self.assertTrue(set(unix_crypt_schemes).issuperset(schemes))
# check for hash support
self.check_unix_disabled(ctx)
for scheme, hash in [
("sha512_crypt", ('$6$rounds=41128$VoQLvDjkaZ6L6BIE$4pt.1Ll1XdDYduEwEYPCMOBiR6W6'
'znsyUEoNlcVXpv2gKKIbQolgmTGe6uEEVJ7azUxuc8Tf7zV9SD2z7Ij751')),
("sha256_crypt", ('$5$rounds=31817$iZGmlyBQ99JSB5n6$p4E.pdPBWx19OajgjLRiOW0itGny'
'xDGgMlDcOsfaI17')),
("md5_crypt", '$1$TXl/FX/U$BZge.lr.ux6ekjEjxmzwz0'),
("des_crypt", 'kAJJz.Rwp0A/I'),
]:
if scheme in schemes:
self.assertTrue(ctx.verify("test", hash))
#=============================================================================
# eof
#=============================================================================
|
theguardian/JIRA-APPy
|
lib/passlib/tests/test_hosts.py
|
Python
|
gpl-2.0
| 3,934
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class BarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Bar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT, self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'),
self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Bar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[valuefieldname], width, color='r')
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
with open(output, 'w') as f:
f.write('<html><img src="' + plotFilename + '"/></html>')
|
drnextgis/QGIS
|
python/plugins/processing/algs/qgis/BarPlot.py
|
Python
|
gpl-2.0
| 3,271
|
###############################################################################
# test_sym_char_class.py: Test module for PATTERN MATCH - symbol char class
# class
# Copyright (C) 2011 Brno University of Technology, ANT @ FIT
# Author(s): Jaroslav Suchodol <xsucho04@stud.fit.vutbr.cz>
###############################################################################
#
# LICENSE TERMS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. All advertising materials mentioning features or use of this software
# or firmware must display the following acknowledgement:
#
# This product includes software developed by the University of
# Technology, Faculty of Information Technology, Brno and its
# contributors.
#
# 4. Neither the name of the Company nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# This software or firmware is provided ``as is'', and any express or implied
# warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are disclaimed.
# In no event shall the company or contributors be liable for any
# direct, indirect, incidental, special, exemplary, or consequential
# damages (including, but not limited to, procurement of substitute
# goods or services; loss of use, data, or profits; or business
# interruption) however caused and on any theory of liability, whether
# in contract, strict liability, or tort (including negligence or
# otherwise) arising in any way out of the use of this software, even
# if advised of the possibility of such damage.
#
# $Id$
from netbench.pattern_match.b_symbol import io_mapper
from netbench.pattern_match.sym_char import b_Sym_char
from netbench.pattern_match.sym_kchar import b_Sym_kchar
from netbench.pattern_match.sym_string import b_Sym_string
from netbench.pattern_match.sym_char_class import b_Sym_char_class
from netbench.pattern_match.pattern_exceptions import \
symbol_string_to_short, \
symbol_accept_exception, \
symbol_import_exception
import unittest
class test_b_Sym_char_class(unittest.TestCase):
"""A base test class to represent a char class symbol."""
def test_accept(self):
"""accept()"""
# method accept(text):
# Check if len(text) == 0,
# then is call exception symbol_string_to_short
ab = b_Sym_char_class("ab", set(['a', 'b']), 0)
try:
ab.accept("")
self.assertTrue(False)
except symbol_string_to_short:
self.assertTrue(True)
# Check if text[0] in self.charClass, then is return value text[1:]
ab = b_Sym_char_class("ab", set(['a', 'b']), 0)
self.assertTrue(ab.accept("adam") == "dam")
# In case text[0] != self.char[0],
# then is call exception symbol_accept_exception
ab = b_Sym_char_class("ab", set(['a', 'b']), 0)
try:
ab.accept("eva")
self.assertTrue(False)
except symbol_accept_exception:
self.assertTrue(True)
def test_collision(self):
"""collision()"""
# method collision(set_of_symbols):
# Try with suitable objects class sym_char, sym_char_class,
# sym_string. Check correct output (is / is not collision).
a = b_Sym_char('a', 'a', 0)
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 1)
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 2)
adam = b_Sym_string("baba", "baba", 3)
set_of_symbols = set([a, cd, adam])
self.assertTrue(ef.collision(set_of_symbols) == False)
fg = b_Sym_char_class("set(['f', 'g'])", set(['f', 'g']), 4)
set_of_symbols = set([a, fg, adam])
self.assertTrue(ef.collision(set_of_symbols) == True)
def test_export_symbol(self):
"""export_symbol()"""
# Check return correct representation of symbol.
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 0)
self.assertTrue(cd.export_symbol() == "16364")
def test_import_symbol(self):
"""import_symbol()"""
# method import_symbol(text_repr, tid):
# Check whether is from text_repr created and returned correct object
# and having set self._id on tid and all parametrs are correct set.
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 0)
cd.import_symbol("16566", 15)
self.assertTrue(cd.charClass == set(['e', 'f']))
self.assertTrue(cd._text == "[ef]")
self.assertTrue(cd._id == 15)
# Check if is text_repr represented by other type, then is call
# exception symbol_import_exception.
try:
cd.import_symbol("061", 17)
self.assertTrue(False)
except symbol_import_exception:
self.assertTrue(True)
def test___str__(self):
"""__str__()"""
# Check return self.charClass
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 0)
self.assertTrue(cd.__str__() == str(cd.charClass))
def test_compute_equal(self):
"""compute_equal()"""
# method compute_equal(other):
# If is other object of type sym_char_class return True if
# arguments are same, otherwise return False.
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 0)
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(cd.compute_equal(ef) == False)
ef = b_Sym_char_class("set(['c', 'd'])", set(['d', 'c']), 1)
self.assertTrue(cd.compute_equal(ef) == True)
a = b_Sym_char('a', 'a', 0)
self.assertTrue(cd.compute_equal(a) == False)
def test___hash__(self):
"""__hash__()"""
# Check return hash(frozenset(self.charClass)).
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(ef.__hash__() == hash(frozenset(ef.charClass)))
def test___repr__(self):
"""__repr__()"""
# Check return self.charClass.
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(ef.__repr__() == repr(ef.charClass))
def test_get_support_type(self):
"""get_support_type()"""
# Check return [b_symbol.io_mapper["b_Sym_char_class"]].
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(ef.get_support_type() ==
[io_mapper["b_Sym_char_class"]])
def test_compute_collision(self):
"""compute_collision()"""
# Check correct compute of collision for objects of type sym_char_class.
cd = b_Sym_char_class("set(['c', 'd'])", set(['c', 'd']), 0)
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(cd.compute_collision(ef) == (set([cd]), set(), set([ef])))
ef = b_Sym_char_class("set(['e', 'f'])", set(['c', 'f']), 1)
result = cd.compute_collision(ef)
newSymbol = result[0].pop()
self.assertTrue(newSymbol.charClass == set(['d']))
newSymbol = result[2].pop()
self.assertTrue(newSymbol.charClass == set(['f']))
newSymbol = result[1].pop()
self.assertTrue(newSymbol.charClass == set(['c']))
def test_get_text(self):
"""get_text()"""
# Check return correct representation.
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(ef.get_text() == "[ef]")
chars = set()
for i in range(0, 256):
chars.add(chr(i))
chars.remove('2')
chars.remove('3')
chars.remove('4')
chars.remove('7')
chars.remove('8')
chars.remove('9')
big_set = b_Sym_char_class("big_set", chars, 2)
self.assertTrue(big_set.get_text() == "^[234789]")
def test_is_empty(self):
"""is_empty()"""
# If is len(self.charClass) == 0 and self._id != -1 return True,
# otherwise return False.
ef = b_Sym_char_class("set(['e', 'f'])", set(['e', 'f']), 1)
self.assertTrue(ef.is_empty() == False)
near_empty = b_Sym_char_class("near_empty", set(), -1)
self.assertTrue(near_empty.is_empty() == False)
empty = b_Sym_char_class("empty", set(), 15)
self.assertTrue(empty.is_empty() == True)
def test_compute_double_stride(self):
"""compute_double_stride()"""
# Method compute_double_stride(compSymbol, reverse, last, local_chars)
# Test with compSymbol type sym_char and sym_char_class.
# If the reverse is True then change order self and compSymbol.
# compSymbol type sym_char ; reverse = False
ac = b_Sym_char_class('ac', set(['a', 'c']), 0)
b = b_Sym_char('b', 'b', 1)
local_chars = list()
chars = set()
for i in range(0,256):
chars.add(chr(i))
local_chars.append(chars)
new_kchar = ac.compute_double_stride(b, False, 2, local_chars)[0]
new_local_chars = ac.compute_double_stride(b, False, 2, local_chars)[1]
reference_kchar = b_Sym_kchar("[ac]b", (set(['a', 'c']),'b'), 2)
reference_kchar_2 = \
b_Sym_kchar("[ac]b", (frozenset(['a', 'c']),frozenset(['b'])), 2)
reference_kchar.last = 2
reference_kchar_2.last = 2
reference_local_chars = local_chars[0] - set([b.char])
self.assertTrue(new_kchar == reference_kchar
or new_kchar == reference_kchar_2)
self.assertTrue(new_local_chars[0] == reference_local_chars)
self.assertTrue(new_kchar.last == 2)
# compSymbol type sym_char_class ; reverse = False
ac = b_Sym_char_class('ac', set(['a', 'c']), 0)
bc = b_Sym_char_class("set(['b', 'c'])", set(['b', 'c']), 1)
local_chars = list()
chars = set()
for i in range(0,256):
chars.add(chr(i))
local_chars.append(chars)
new_kchar = ac.compute_double_stride(bc, False, 3, local_chars)[0]
new_local_chars = ac.compute_double_stride(bc, False, 3, local_chars)[1]
reference_kchar = b_Sym_kchar("[ac][bc]",
(set(['a', 'c']), set(['b', 'c'])), 2)
reference_kchar_2 = \
b_Sym_kchar("[ac][bc]",
(frozenset(['a', 'c']),frozenset(['b','c'])), 2)
reference_kchar.last = 3
reference_kchar_2.last = 3
reference_local_chars = local_chars[0] - bc.charClass
self.assertTrue(new_kchar == reference_kchar
or new_kchar == reference_kchar_2)
self.assertTrue(new_local_chars[0] == reference_local_chars)
self.assertTrue(new_kchar.last == 3)
# compSymbol type sym_char ; reverse = True
ac = b_Sym_char_class('ac', set(['a', 'c']), 0)
b = b_Sym_char('b', 'b', 1)
local_chars = list()
chars = set()
for i in range(0,256):
chars.add(chr(i))
local_chars.append(chars)
new_kchar = ac.compute_double_stride(b, True, 2, local_chars)[0]
new_local_chars = ac.compute_double_stride(b, True, 2, local_chars)[1]
reference_kchar = b_Sym_kchar("b[ac]", ('b', set(['a', 'c'])), 2)
reference_kchar_2 = \
b_Sym_kchar("b[ac]", (frozenset(['b']),frozenset(['a', 'c'])), 2)
reference_kchar.last = 2
reference_kchar_2.last = 2
reference_local_chars = local_chars[0] - ac.charClass
self.assertTrue(new_kchar == reference_kchar
or new_kchar == reference_kchar_2)
self.assertTrue(new_local_chars[0] == reference_local_chars)
self.assertTrue(new_kchar.last == 2)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(test_b_Sym_char_class)
unittest.TextTestRunner(verbosity=2).run(suite)
|
vhavlena/appreal
|
netbench/pattern_match/tests/test_sym_char_class.py
|
Python
|
gpl-2.0
| 12,368
|
"""optparse - a powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik; see http://optik.sourceforge.net/ .
If you have problems with this module, please do not file bugs,
patches, or feature requests with Python; instead, use Optik's
SourceForge project page:
http://sourceforge.net/projects/optik
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
"""
# Python developers: please do not make changes to this file, since
# it is automatically generated from the Optik source code.
__version__ = "1.5.1"
__all__ = ['Option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import string, re
import types
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 509 2006-04-20 00:58:24Z gward
# Id: option.py 509 2006-04-20 00:58:24Z gward
# Id: help.py 509 2006-04-20 00:58:24Z gward
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attrs.has_key(attr):
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs.keys()),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise RuntimeError, "unknown action %r" % self.action
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
# For compatibility with Python 2.2
try:
True, False
except NameError:
(True, False) = (1, 0)
def isbasestring(x):
return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if dict.has_key(attr):
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if self._short_opt.has_key(opt):
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if self._long_opt.has_key(opt):
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif not self.defaults.has_key(option.dest):
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (self._short_opt.has_key(opt_str) or
self._long_opt.has_key(opt_str))
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
self.allow_interspersed_args = True
def disable_interspersed_args(self):
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if wordmap.has_key(s):
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, types.StringType):
text = text.translate(self.whitespace_trans)
elif isinstance(text, types.UnicodeType):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)
|
inducer/jhbuild
|
jhbuild/cut_n_paste/optparse.py
|
Python
|
gpl-2.0
| 73,285
|
from matplotlib import rcParams, rc
import numpy as np
import sys
from fitFunctions import gaussian
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
import pickle
import smooth
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
def calcThreshold(phase,nSigma=2.5,nSamples=5000):
'''
Calculate the threshold (in phase units) corresponding
to a sigma threshold (note: look at this f'n, seems a bit odd
Appears to define sigma as one-sided lower 95% threshold)
'''
n,bins= np.histogram(phase[:nSamples],bins=100)
n = np.array(n,dtype='float32')/np.sum(n)
tot = np.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = np.sum(n[:i])
med = bins[np.abs(tot-0.5).argmin()]
thresh = bins[np.abs(tot-0.05).argmin()]
threshold = med-nSigma*abs(med-thresh)
return threshold
def sigmaTrigger(data,nSigmaTrig=7.,deadtime=10):
'''
Find photon pulses using a sigma trigger
INPUTS:
data - phase timestream (filtered or raw)
nSigmaTrig - threshold for photon detection, in units sigma from baseline
deadtime - trigger deadtime in ticks (us)
OUTPUTS:
Dictionary with keys:
peakIndices - indices of detected pulses in phase stream
peakHeights - heights of detected pulses (in same units as input data)
'''
data = np.array(data)
med = np.median(data)
trigMask = data > (med + np.std(data)*nSigmaTrig)
if np.sum(trigMask) > 0:
peakIndices = np.where(trigMask)[0]
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
else:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
peakHeights = data[peakIndices]
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def detectPulses(data,threshold=None,nSigmaThreshold=3.,deadtime=10,nNegDerivChecks=10,negDerivLenience=1,bNegativePulses = True):
#deadtime in ticks (us)
if bNegativePulses:
data = np.array(data)
else:
data = -np.array(data) #flip to negative pulses
if threshold is None:
threshold = np.median(data)-nSigmaThreshold*np.std(data)
derivative = np.diff(data)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
triggerBooleans = data[nNegDerivChecks:-2] < threshold
negDerivChecksSum = np.zeros(len(negDeriv[0:-nNegDerivChecks-1]))
for i in range(nNegDerivChecks):
negDerivChecksSum += negDeriv[i:i-nNegDerivChecks-1]
peakCondition0 = negDerivChecksSum >= nNegDerivChecks-negDerivLenience
peakCondition1 = np.logical_and(posDeriv[nNegDerivChecks:-1],posDeriv[nNegDerivChecks+1:])
peakCondition01 = np.logical_and(peakCondition0,peakCondition1)
peakBooleans = np.logical_and(triggerBooleans,peakCondition01)
try:
peakIndices = np.where(peakBooleans)[0]+nNegDerivChecks
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
if bNegativePulses:
peakHeights = data[peakIndices]
else:
peakHeights = -data[peakIndices] #flip back to positive sign
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def optimizeTrigCond(data, nPeaks, sigmaThreshList=[3.], nNegDerivChecksList=[10], negDerivLenienceList=[1], bNegativePulses=True):
minSigma = 1000
optSigmaThresh = 0
optNNegDerivChecks = 0
optNegDerivLenience = 0
optPeakDict = {'peakIndices':np.array([]), 'peakHeights':np.array([])}
for sigmaThresh in sigmaThreshList:
for nNegDerivChecks in nNegDerivChecksList:
for negDerivLenience in negDerivLenienceList:
peakDict = detectPulses(data, nSigmaThreshold=sigmaThresh, nNegDerivChecks=nNegDerivChecks, negDerivLenience=negDerivLenience, bNegativePulses=bNegativePulses)
if(len(peakDict['peakIndices'])>=nPeaks):
sigma = np.std(peakDict['peakHeights'])
if(sigma<minSigma):
minSigma = sigma
optSigmaThresh = sigmaThresh
optNNegDerivChecks = nNegDerivChecks
optNegDerivLenience = negDerivLenience
optPeakDict = peakDict
return optSigmaThresh, optNNegDerivChecks, optNegDerivLenience, minSigma, optPeakDict
def findSigmaThresh(data, initSigmaThresh=2., tailSlack=0., isPlot=False):
'''
Finds the optimal photon trigger threshold by cutting out the noise tail
in the pulse height histogram.
INPUTS:
data - filtered phase timestream data (positive pulses)
initSigmaThresh - sigma threshold to use when constructing initial
pulse height histogram
tailSlack - amount (in same units as data) to relax trigger threshold
isPlot - make peak height histograms if true
OUTPUTS:
threshold - trigger threshold in same units as data
sigmaThresh - trigger threshold in units sigma from median
'''
peakdict = sigmaTrigger(data, nSigmaTrig=initSigmaThresh)
peaksHist, peaksHistBins = np.histogram(peakdict['peakHeights'], bins='auto')
if(isPlot):
plt.plot(peaksHistBins[:-1], peaksHist)
plt.title('Unsmoothed Plot')
plt.show()
print 'peaksHistLen:', len(peaksHist)
peaksHist = smooth.smooth(peaksHist,(len(peaksHistBins)/20)*2+1)
print 'peaksHistSmoothLen:', len(peaksHist)
if(isPlot):
plt.plot(peaksHistBins[0:len(peaksHist)], peaksHist)
plt.title('smoothed plot')
plt.show()
minima=np.ones(len(peaksHist)) #keeps track of minima locations; element is 1 if minimum exists at that index
minimaCount = 1
#while there are multiple local minima, look for the deepest one
while(np.count_nonzero(minima)>1):
minima = np.logical_and(minima, np.logical_and((peaksHist<=np.roll(peaksHist,minimaCount)),(peaksHist<=np.roll(peaksHist,-minimaCount))))
#print 'minima array:', minima
minima[minimaCount-1]=0
minima[len(minima)-minimaCount]=0 #get rid of boundary effects
minimaCount += 1
thresholdInd = np.where(minima)[0][0]
threshold = peaksHistBins[thresholdInd]-tailSlack
sigmaThresh = (threshold-np.median(data))/np.std(data)
return threshold, sigmaThresh
|
mstrader/MkidDigitalReadout
|
DarknessFilters/triggerPhotons.py
|
Python
|
gpl-2.0
| 7,378
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import swisseph as swe
import unittest
class TestSweLunEclipse(unittest.TestCase):
@classmethod
def setUpClass(cls):
swe.set_ephe_path()
def test_01(self):
jd = 2454466.5
flags = swe.FLG_SWIEPH
geopos = (12.1, 49.0, 330)
rflags, tret = swe.lun_eclipse_when(jd, flags, 0)
self.assertEqual(rflags, 4)
self.assertEqual(len(tret), 10)
t1 = (2454517.6430690456, 0.0, 2454517.57172334, 2454517.7144189165,
2454517.6258038115, 2454517.6603509136, 2454517.525389122,
2454517.7608554545, 0.0, 0.0)
for i in range(10):
self.assertAlmostEqual(tret[i], t1[i])
tjdut = tret[0]
rflags, tret, attr = swe.lun_eclipse_when_loc(tjdut, geopos, flags)
self.assertEqual(rflags, 29584)
self.assertEqual(len(tret), 10)
t1 = (2454695.3820517384, 0.0, 2454695.316710297, 2454695.447390333,
0.0, 0.0, 2454695.2672055247, 2454695.496797575, 0.0, 0.0)
for i in range(10):
self.assertAlmostEqual(tret[i], t1[i])
self.assertEqual(len(attr), 20)
t1 = (0.8076127691060245, 1.8366497324296667, 0.0, 0.0,
326.9885866287668, 21.362590458352507, 21.402251051495636,
0.5301609960196174, 0.8076127691060245, 138.0, 28.0, 28.0,
28.0, 28.0, 28.0, 28.0, 28.0, 28.0, 28.0, 28.0)
for i in range(20):
self.assertAlmostEqual(attr[i], t1[i])
rflags, attr = swe.lun_eclipse_how(tjdut, geopos, flags)
self.assertEqual(rflags, 4)
self.assertEqual(len(attr), 20)
t1 = (1.1061093373639495, 2.145134309769692, 0.0, 0.0,
73.8203145568749, 26.299290272560974, 26.330700027276947,
0.3801625589840114, 1.1061093373639495, 133.0, 26.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
for i in range(20):
self.assertAlmostEqual(attr[i], t1[i])
if __name__ == '__main__':
unittest.main()
# vi: sw=4 ts=4 et
|
astrorigin/pyswisseph
|
tests/test_swe_lun_eclipse.py
|
Python
|
gpl-2.0
| 2,097
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A set of utilities, mostly for post-processing and visualization
We put arrays on disk as raw bytes, extending along the first dimension.
Alongside each array x we ensure the value x.dtype which stores the string
description of the array's dtype.
See Also:
------------
@url
.. image::
@author epnev
"""
# \package caiman/dource_ectraction/cnmf
# \version 1.0
# \copyright GNU General Public License v2.0
# \date Created on Sat Sep 12 15:52:53 2015
from builtins import str
from builtins import range
from past.utils import old_div
import cv2
import h5py
import logging
import numpy as np
import os
import pylab as pl
import scipy
from scipy.sparse import spdiags, issparse, csc_matrix, csr_matrix
import scipy.ndimage.morphology as morph
from skimage.feature.peak import _get_high_intensity_peaks
import tifffile
from typing import List
from .initialization import greedyROI
from ...base.rois import com
from ...mmapping import parallel_dot_product, load_memmap
from ...cluster import extract_patch_coordinates
from ...utils.stats import df_percentile
def decimation_matrix(dims, sub):
D = np.prod(dims)
if sub == 2 and D <= 10000: # faster for small matrices
ind = np.arange(D) // 2 - \
np.arange(dims[0], dims[0] + D) // (dims[0] * 2) * (dims[0] // 2) - \
(dims[0] % 2) * (np.arange(D) % (2 * dims[0]) > dims[0]) * (np.arange(1, 1 + D) % 2)
else:
def create_decimation_matrix_bruteforce(dims, sub):
dims_ds = tuple(1 + (np.array(dims) - 1) // sub)
d_ds = np.prod(dims_ds)
ds_matrix = np.eye(d_ds)
ds_matrix = np.repeat(np.repeat(
ds_matrix.reshape((d_ds,) + dims_ds, order='F'), sub, 1),
sub, 2)[:, :dims[0], :dims[1]].reshape((d_ds, -1), order='F')
ds_matrix /= ds_matrix.sum(1)[:, None]
ds_matrix = csc_matrix(ds_matrix, dtype=np.float32)
return ds_matrix
tmp = create_decimation_matrix_bruteforce((dims[0], sub), sub).indices
ind = np.concatenate([tmp] * (dims[1] // sub + 1))[:D] + \
np.arange(D) // (dims[0] * sub) * ((dims[0] - 1) // sub + 1)
data = 1. / np.unique(ind, return_counts=True)[1][ind]
return csc_matrix((data, ind, np.arange(1 + D)), dtype=np.float32)
def peak_local_max(image, min_distance=1, threshold_abs=None,
threshold_rel=None, exclude_border=True, indices=True,
num_peaks=np.inf, footprint=None):
"""Find peaks in an image as coordinate list or boolean mask.
Adapted from skimage to use opencv for speed.
Replaced scipy.ndimage.maximum_filter by cv2.dilate.
Peaks are the local maxima in a region of `2 * min_distance + 1`
(i.e. peaks are separated by at least `min_distance`).
If peaks are flat (i.e. multiple adjacent pixels have identical
intensities), the coordinates of all such pixels are returned.
If both `threshold_abs` and `threshold_rel` are provided, the maximum
of the two is chosen as the minimum intensity threshold of peaks.
Parameters
----------
image : ndarray
Input image.
min_distance : int, optional
Minimum number of pixels separating peaks in a region of `2 *
min_distance + 1` (i.e. peaks are separated by at least
`min_distance`).
To find the maximum number of peaks, use `min_distance=1`.
threshold_abs : float, optional
Minimum intensity of peaks. By default, the absolute threshold is
the minimum intensity of the image.
threshold_rel : float, optional
Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
exclude_border : int, optional
If nonzero, `exclude_border` excludes peaks from
within `exclude_border`-pixels of the border of the image.
indices : bool, optional
If True, the output will be an array representing peak
coordinates. If False, the output will be a boolean array shaped as
`image.shape` with peaks present at True elements.
num_peaks : int, optional
Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` peaks based on highest peak intensity.
footprint : ndarray of bools, optional
If provided, `footprint == 1` represents the local region within which
to search for peaks at every point in `image`. Overrides
`min_distance` (also for `exclude_border`).
Returns
-------
output : ndarray or ndarray of bools
* If `indices = True` : (row, column, ...) coordinates of peaks.
* If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
Notes
-----
The peak local maximum function returns the coordinates of local peaks
(maxima) in an image. A maximum filter is used for finding local maxima.
This operation dilates the original image. After comparison of the dilated
and original image, this function returns the coordinates or a mask of the
peaks where the dilated image equals the original image.
Examples
--------
>>> img1 = np.zeros((7, 7))
>>> img1[3, 4] = 1
>>> img1[3, 2] = 1.5
>>> img1
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> peak_local_max(img1, min_distance=1)
array([[3, 4],
[3, 2]])
>>> peak_local_max(img1, min_distance=2)
array([[3, 2]])
>>> img2 = np.zeros((20, 20, 20))
>>> img2[10, 10, 10] = 1
>>> peak_local_max(img2, exclude_border=0)
array([[10, 10, 10]])
"""
if type(exclude_border) == bool:
exclude_border = min_distance if exclude_border else 0
out = np.zeros_like(image, dtype=np.bool)
if np.all(image == image.flat[0]):
if indices is True:
return np.empty((0, 2), np.int)
else:
return out
# Non maximum filter
if footprint is not None:
# image_max = ndi.maximum_filter(image, footprint=footprint,
# mode='constant')
image_max = cv2.dilate(image, footprint=footprint, iterations=1)
else:
size = 2 * min_distance + 1
# image_max = ndi.maximum_filter(image, size=size, mode='constant')
image_max = cv2.dilate(image, cv2.getStructuringElement(
cv2.MORPH_RECT, (size, size)), iterations=1)
mask = image == image_max
if exclude_border:
# zero out the image borders
for i in range(mask.ndim):
mask = mask.swapaxes(0, i)
remove = (footprint.shape[i] if footprint is not None
else 2 * exclude_border)
mask[:remove // 2] = mask[-remove // 2:] = False
mask = mask.swapaxes(0, i)
# find top peak candidates above a threshold
thresholds = []
if threshold_abs is None:
threshold_abs = image.min()
thresholds.append(threshold_abs)
if threshold_rel is not None:
thresholds.append(threshold_rel * image.max())
if thresholds:
mask &= image > max(thresholds)
# Select highest intensities (num_peaks)
coordinates = _get_high_intensity_peaks(image, mask, num_peaks)
if indices is True:
return coordinates
else:
nd_indices = tuple(coordinates.T)
out[nd_indices] = True
return out
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o : (d1[o], d2[o]) for o in intersect_keys if np.any(d1[o] != d2[o])}
same = set(o for o in intersect_keys if np.all(d1[o] == d2[o]))
return added, removed, modified, same
def computeDFF_traces(Yr, A, C, bl, quantileMin=8, frames_window=200):
extract_DF_F(Yr, A, C, bl, quantileMin, frames_window)
def extract_DF_F(Yr, A, C, bl, quantileMin=8, frames_window=200, block_size=400, dview=None):
""" Compute DFF function from cnmf output.
Disclaimer: it might be memory inefficient
Args:
Yr: ndarray (2D)
movie pixels X time
A: scipy.sparse.coo_matrix
spatial components (from cnmf cnm.A)
C: ndarray
temporal components (from cnmf cnm.C)
bl: ndarray
baseline for each component (from cnmf cnm.bl)
quantile_min: float
quantile minimum of the
frames_window: int
number of frames for running quantile
Returns:
Cdf:
the computed Calcium acitivty to the derivative of f
See Also:
..image::docs/img/onlycnmf.png
"""
nA = np.array(np.sqrt(A.power(2).sum(0)).T)
A = scipy.sparse.coo_matrix(A / nA.T)
C = C * nA
bl = (bl * nA.T).squeeze()
nA = np.array(np.sqrt(A.power(2).sum(0)).T)
T = C.shape[-1]
if 'memmap' in str(type(Yr)):
if block_size >= 500:
print('Forcing single thread for memory issues')
dview_res = None
else:
print('Using thread. If memory issues set block_size larger than 500')
dview_res = dview
AY = parallel_dot_product(Yr, A, dview=dview_res, block_size=block_size,
transpose=True).T
else:
AY = A.T.dot(Yr)
bas_val = bl[None, :]
Bas = np.repeat(bas_val, T, 0).T
AA = A.T.dot(A)
AA.setdiag(0)
Cf = (C - Bas) * (nA**2)
C2 = AY - AA.dot(C)
if frames_window is None or frames_window > T:
Df = np.percentile(C2, quantileMin, axis=1)
C_df = Cf / Df[:, None]
else:
Df = scipy.ndimage.percentile_filter(
C2, quantileMin, (frames_window, 1))
C_df = Cf / Df
return C_df
def detrend_df_f(A, b, C, f, YrA=None, quantileMin=8, frames_window=500,
flag_auto=True, use_fast=False, detrend_only=False):
""" Compute DF/F signal without using the original data.
In general much faster than extract_DF_F
Args:
A: scipy.sparse.csc_matrix
spatial components (from cnmf cnm.A)
b: ndarray
spatial background components
C: ndarray
temporal components (from cnmf cnm.C)
f: ndarray
temporal background components
YrA: ndarray
residual signals
quantile_min: float
quantile used to estimate the baseline (values in [0,100])
frames_window: int
number of frames for computing running quantile
flag_auto: bool
flag for determining quantile automatically
use_fast: bool
flag for u´sing approximate fast percentile filtering
detrend_only: bool (False)
flag for only subtracting baseline and not normalizing by it.
Used in 1p data processing where baseline fluorescence cannot be
determined.
Returns:
F_df:
the computed Calcium acitivty to the derivative of f
"""
if C is None:
logging.warning("There are no components for DF/F extraction!")
return None
if b is None or f is None:
b = np.zeros((A.shape[0], 1))
f = np.zeros((1, C.shape[1]))
logging.warning("Background components not present. Results should" +
" not be interpreted as DF/F normalized but only" +
" as detrended.")
detrend_only = True
if 'csc_matrix' not in str(type(A)):
A = scipy.sparse.csc_matrix(A)
if 'array' not in str(type(b)):
b = b.toarray()
if 'array' not in str(type(C)):
C = C.toarray()
if 'array' not in str(type(f)):
f = f.toarray()
nA = np.sqrt(np.ravel(A.power(2).sum(axis=0)))
nA_mat = scipy.sparse.spdiags(nA, 0, nA.shape[0], nA.shape[0])
nA_inv_mat = scipy.sparse.spdiags(1. / nA, 0, nA.shape[0], nA.shape[0])
A = A * nA_inv_mat
C = nA_mat * C
if YrA is not None:
YrA = nA_mat * YrA
F = C + YrA if YrA is not None else C
B = A.T.dot(b).dot(f)
T = C.shape[-1]
if flag_auto:
data_prct, val = df_percentile(F[:, :frames_window], axis=1)
if frames_window is None or frames_window > T:
Fd = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(B, data_prct)])
if not detrend_only:
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
F_df = F - Fd[:, None]
else:
if use_fast:
Fd = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(F, data_prct)])
Df = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(B, data_prct)])
else:
Fd = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(B, data_prct)])
if not detrend_only:
F_df = (F - Fd) / (Df + Fd)
else:
F_df = F - Fd
else:
if frames_window is None or frames_window > T:
Fd = np.percentile(F, quantileMin, axis=1)
Df = np.percentile(B, quantileMin, axis=1)
if not detrend_only:
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
F_df = F - Fd[:, None]
else:
Fd = scipy.ndimage.percentile_filter(
F, quantileMin, (frames_window, 1))
Df = scipy.ndimage.percentile_filter(
B, quantileMin, (frames_window, 1))
if not detrend_only:
F_df = (F - Fd) / (Df + Fd)
else:
F_df = F - Fd
return F_df
def fast_prct_filt(input_data, level=8, frames_window=1000):
"""
Fast approximate percentage filtering
"""
data = np.atleast_2d(input_data).copy()
T = np.shape(data)[-1]
downsampfact = frames_window
elm_missing = int(np.ceil(T * 1.0 / downsampfact)
* downsampfact - T)
padbefore = int(np.floor(elm_missing / 2.))
padafter = int(np.ceil(elm_missing / 2.))
tr_tmp = np.pad(data.T, ((padbefore, padafter), (0, 0)), mode='reflect')
numFramesNew, num_traces = np.shape(tr_tmp)
#% compute baseline quickly
tr_BL = np.reshape(tr_tmp, (downsampfact, int(numFramesNew / downsampfact),
num_traces), order='F')
tr_BL = np.percentile(tr_BL, level, axis=0)
tr_BL = scipy.ndimage.zoom(np.array(tr_BL, dtype=np.float32),
[downsampfact, 1], order=3, mode='nearest',
cval=0.0, prefilter=True)
if padafter == 0:
data -= tr_BL.T
else:
data -= tr_BL[padbefore:-padafter].T
return data.squeeze()
#%%
def detrend_df_f_auto(A, b, C, f, dims=None, YrA=None, use_annulus = True,
dist1 = 7, dist2 = 5, frames_window=1000,
use_fast = False):
"""
Compute DF/F using an automated level of percentile filtering based on
kernel density estimation.
Args:
A: scipy.sparse.csc_matrix
spatial components (from cnmf cnm.A)
b: ndarray
spatial backgrounds
C: ndarray
temporal components (from cnmf cnm.C)
f: ndarray
temporal background components
YrA: ndarray
residual signals
frames_window: int
number of frames for running quantile
use_fast: bool
flag for using fast approximate percentile filtering
Returns:
F_df:
the computed Calcium acitivty to the derivative of f
"""
if 'csc_matrix' not in str(type(A)):
A = scipy.sparse.csc_matrix(A)
if 'array' not in str(type(b)):
b = b.toarray()
if 'array' not in str(type(C)):
C = C.toarray()
if 'array' not in str(type(f)):
f = f.toarray()
nA = np.sqrt(np.ravel(A.power(2).sum(axis=0)))
nA_mat = scipy.sparse.spdiags(nA, 0, nA.shape[0], nA.shape[0])
nA_inv_mat = scipy.sparse.spdiags(1. / nA, 0, nA.shape[0], nA.shape[0])
A = A * nA_inv_mat
C = nA_mat * C
if YrA is not None:
YrA = nA_mat * YrA
F = C + YrA if YrA is not None else C
K = A.shape[-1]
A_ann = A.copy()
if use_annulus:
dist1 = 7
dist2 = 5
X, Y = np.meshgrid(np.arange(-dist1, dist1), np.arange(-dist1, dist1))
R = np.sqrt(X**2+Y**2)
R[R > dist1] = 0
R[R < dist2] = 0
R = R.astype('bool')
for k in range(K):
a = A[:, k].toarray().reshape(dims, order='F') > 0
a2 = np.bitwise_xor(morph.binary_dilation(a, R), a)
a2 = a2.astype(float).flatten(order='F')
a2 /= np.sqrt(a2.sum())
a2 = scipy.sparse.csc_matrix(a2)
A_ann[:, k] = a2.T
B = A_ann.T.dot(b).dot(f)
T = C.shape[-1]
data_prct, val = df_percentile(F[:, :frames_window], axis=1)
if frames_window is None or frames_window > T:
Fd = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([np.percentile(f, prctileMin) for f, prctileMin in
zip(B, data_prct)])
F_df = (F - Fd[:, None]) / (Df[:, None] + Fd[:, None])
else:
if use_fast:
Fd = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(F, data_prct)])
Df = np.stack([fast_prct_filt(f, level=prctileMin,
frames_window=frames_window) for
f, prctileMin in zip(B, data_prct)])
else:
Fd = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(F, data_prct)])
Df = np.stack([scipy.ndimage.percentile_filter(
f, prctileMin, (frames_window)) for f, prctileMin in
zip(B, data_prct)])
F_df = (F - Fd) / (Df + Fd)
return F_df
#%%
def manually_refine_components(Y, xxx_todo_changeme, A, C, Cn, thr=0.9, display_numbers=True,
max_number=None, cmap=None, **kwargs):
"""Plots contour of spatial components
against a background image and allows to interactively add novel components by clicking with mouse
Args:
Y: ndarray
movie in 2D
(dx,dy): tuple
dimensions of the square used to identify neurons (should be set to the galue of gsiz)
A: np.ndarray or sparse matrix
Matrix of Spatial components (d x K)
Cn: np.ndarray (2D)
Background image (e.g. mean, correlation)
thr: scalar between 0 and 1
Energy threshold for computing contours (default 0.995)
display_number: Boolean
Display number of ROIs if checked (default True)
max_number: int
Display the number for only the first max_number components (default None, display all numbers)
cmap: string
User specifies the colormap (default None, default colormap)
Returns:
A: np.ndarray
matrix A os estimated spatial component contributions
C: np.ndarray
array of estimated calcium traces
"""
(dx, dy) = xxx_todo_changeme
if issparse(A):
A = np.array(A.todense())
else:
A = np.array(A)
d1, d2 = np.shape(Cn)
d, nr = np.shape(A)
if max_number is None:
max_number = nr
x, y = np.mgrid[0:d1:1, 0:d2:1]
pl.imshow(Cn, interpolation=None, cmap=cmap)
cm = com(A, d1, d2)
Bmat = np.zeros((np.minimum(nr, max_number), d1, d2))
for i in range(np.minimum(nr, max_number)):
indx = np.argsort(A[:, i], axis=None)[::-1]
cumEn = np.cumsum(A[:, i].flatten()[indx]**2)
cumEn /= cumEn[-1]
Bvec = np.zeros(d)
Bvec[indx] = cumEn
Bmat[i] = np.reshape(Bvec, np.shape(Cn), order='F')
T = np.shape(Y)[-1]
pl.close()
fig = pl.figure()
ax = pl.gca()
ax.imshow(Cn, interpolation=None, cmap=cmap,
vmin=np.percentile(Cn[~np.isnan(Cn)], 1), vmax=np.percentile(Cn[~np.isnan(Cn)], 99))
for i in range(np.minimum(nr, max_number)):
pl.contour(y, x, Bmat[i], [thr])
if display_numbers:
for i in range(np.minimum(nr, max_number)):
ax.text(cm[i, 1], cm[i, 0], str(i + 1))
A3 = np.reshape(A, (d1, d2, nr), order='F')
while True:
pts = fig.ginput(1, timeout=0)
if pts != []:
print(pts)
xx, yy = np.round(pts[0]).astype(np.int)
coords_y = np.array(list(range(yy - dy, yy + dy + 1)))
coords_x = np.array(list(range(xx - dx, xx + dx + 1)))
coords_y = coords_y[(coords_y >= 0) & (coords_y < d1)]
coords_x = coords_x[(coords_x >= 0) & (coords_x < d2)]
a3_tiny = A3[coords_y[0]:coords_y[-1] +
1, coords_x[0]:coords_x[-1] + 1, :]
y3_tiny = Y[coords_y[0]:coords_y[-1] +
1, coords_x[0]:coords_x[-1] + 1, :]
dy_sz, dx_sz = np.shape(a3_tiny)[:-1]
y2_tiny = np.reshape(y3_tiny, (dx_sz * dy_sz, T), order='F')
a2_tiny = np.reshape(a3_tiny, (dx_sz * dy_sz, nr), order='F')
y2_res = y2_tiny - a2_tiny.dot(C)
y3_res = np.reshape(y2_res, (dy_sz, dx_sz, T), order='F')
a__, c__, center__, b_in__, f_in__ = greedyROI(
y3_res, nr=1, gSig=[np.floor(old_div(dx_sz, 2)), np.floor(old_div(dy_sz, 2))], gSiz=[dx_sz, dy_sz])
a_f = np.zeros((d, 1))
idxs = np.meshgrid(coords_y, coords_x)
a_f[np.ravel_multi_index(
idxs, (d1, d2), order='F').flatten()] = a__
A = np.concatenate([A, a_f], axis=1)
C = np.concatenate([C, c__], axis=0)
indx = np.argsort(a_f, axis=None)[::-1]
cumEn = np.cumsum(a_f.flatten()[indx]**2)
cumEn /= cumEn[-1]
Bvec = np.zeros(d)
Bvec[indx] = cumEn
bmat = np.reshape(Bvec, np.shape(Cn), order='F')
pl.contour(y, x, bmat, [thr])
pl.pause(.01)
elif pts == []:
break
nr += 1
A3 = np.reshape(A, (d1, d2, nr), order='F')
return A, C
def app_vertex_cover(A):
""" Finds an approximate vertex cover for a symmetric graph with adjacency matrix A.
Args:
A: boolean 2d array (K x K)
Adjacency matrix. A is boolean with diagonal set to 0
Returns:
L: A vertex cover of A
Authors:
Eftychios A. Pnevmatikakis, Simons Foundation, 2015
"""
L = []
while A.any():
nz = np.nonzero(A)[0] # find non-zero edges
u = nz[np.random.randint(0, len(nz))]
A[u, :] = False
A[:, u] = False
L.append(u)
return np.asarray(L)
def update_order(A, new_a=None, prev_list=None, method='greedy'):
'''Determines the update order of the temporal components given the spatial
components by creating a nest of random approximate vertex covers
Args:
A: np.ndarray
matrix of spatial components (d x K)
new_a: sparse array
spatial component that is added, in order to efficiently update the orders in online scenarios
prev_list: list of list
orders from previous iteration, you need to pass if new_a is not None
Returns:
O: list of sets
list of subsets of components. The components of each subset can be updated in parallel
lo: list
length of each subset
Written by Eftychios A. Pnevmatikakis, Simons Foundation, 2015
'''
K = np.shape(A)[-1]
if new_a is None and prev_list is None:
if method is 'greedy':
prev_list, count_list = update_order_greedy(A, flag_AA=False)
else:
prev_list, count_list = update_order_random(A, flag_AA=False)
return prev_list, count_list
else:
if new_a is None or prev_list is None:
raise Exception(
'In the online update order you need to provide both new_a and prev_list')
counter = 0
AA = A.T.dot(new_a)
for group in prev_list:
if AA[list(group)].sum() == 0:
group.append(K)
counter += 1
break
if counter == 0:
if prev_list is not None:
prev_list = list(prev_list)
prev_list.append([K])
count_list = [len(gr) for gr in prev_list]
return prev_list, count_list
def order_components(A, C):
"""Order components based on their maximum temporal value and size
Args:
A: sparse matrix (d x K)
spatial components
C: matrix or np.ndarray (K x T)
temporal components
Returns:
A_or: np.ndarray
ordered spatial components
C_or: np.ndarray
ordered temporal components
srt: np.ndarray
sorting mapping
"""
A = np.array(A.todense())
nA2 = np.sqrt(np.sum(A**2, axis=0))
K = len(nA2)
A = np.array(np.matrix(A) * spdiags(old_div(1, nA2), 0, K, K))
nA4 = np.sum(A**4, axis=0)**0.25
C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
mC = np.ndarray.max(np.array(C), axis=1)
srt = np.argsort(nA4 * mC)[::-1]
A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
C_or = spdiags(old_div(1., nA2[srt]), 0, K, K) * (C[srt, :])
return A_or, C_or, srt
def update_order_random(A, flag_AA=True):
"""Determies the update order of temporal components using
randomized partitions of non-overlapping components
"""
K = np.shape(A)[-1]
if flag_AA:
AA = A.copy()
else:
AA = A.T.dot(A)
AA.setdiag(0)
F = (AA) > 0
F = F.toarray()
rem_ind = np.arange(K)
O = []
lo = []
while len(rem_ind) > 0:
L = np.sort(app_vertex_cover(F[rem_ind, :][:, rem_ind]))
if L.size:
ord_ind = set(rem_ind) - set(rem_ind[L])
rem_ind = rem_ind[L]
else:
ord_ind = set(rem_ind)
rem_ind = []
O.append(ord_ind)
lo.append(len(ord_ind))
return O[::-1], lo[::-1]
def update_order_greedy(A, flag_AA=True):
"""Determines the update order of the temporal components
this, given the spatial components using a greedy method
Basically we can update the components that are not overlapping, in parallel
Args:
A: sparse crc matrix
matrix of spatial components (d x K)
OR:
A.T.dot(A) matrix (d x d) if flag_AA = true
flag_AA: boolean (default true)
Returns:
parllcomp: list of sets
list of subsets of components. The components of each subset can be updated in parallel
len_parrllcomp: list
length of each subset
Author:
Eftychios A. Pnevmatikakis, Simons Foundation, 2017
"""
K = np.shape(A)[-1]
parllcomp:List = []
for i in range(K):
new_list = True
for ls in parllcomp:
if flag_AA:
if A[i, ls].nnz == 0:
ls.append(i)
new_list = False
break
else:
if (A[:, i].T.dot(A[:, ls])).nnz == 0:
ls.append(i)
new_list = False
break
if new_list:
parllcomp.append([i])
len_parrllcomp = [len(ls) for ls in parllcomp]
return parllcomp, len_parrllcomp
#%%
def compute_residuals(Yr_mmap_file, A_, b_, C_, f_, dview=None, block_size=1000, num_blocks_per_run=5):
'''compute residuals from memory mapped file and output of CNMF
Args:
A_,b_,C_,f_:
from CNMF
block_size: int
number of pixels processed together
num_blocks_per_run: int
nnumber of parallel blocks processes
Returns:
YrA: ndarray
residuals per neuron
'''
if not ('sparse' in str(type(A_))):
A_ = scipy.sparse.coo_matrix(A_)
Ab = scipy.sparse.hstack((A_, b_)).tocsc()
Cf = np.vstack((C_, f_))
nA = np.ravel(Ab.power(2).sum(axis=0))
if 'mmap' in str(type(Yr_mmap_file)):
YA = parallel_dot_product(Yr_mmap_file, Ab, dview=dview, block_size=block_size,
transpose=True, num_blocks_per_run=num_blocks_per_run) * scipy.sparse.spdiags(old_div(1., nA), 0, Ab.shape[-1], Ab.shape[-1])
else:
YA = (Ab.T.dot(Yr_mmap_file)).T * \
spdiags(old_div(1., nA), 0, Ab.shape[-1], Ab.shape[-1])
AA = ((Ab.T.dot(Ab)) * scipy.sparse.spdiags(old_div(1., nA),
0, Ab.shape[-1], Ab.shape[-1])).tocsr()
return (YA - (AA.T.dot(Cf)).T)[:, :A_.shape[-1]].T
def normalize_AC(A, C, YrA, b, f, neurons_sn):
""" Normalize to unit norm A and b
Args:
A,C,Yr,b,f:
outputs of CNMF
"""
if 'sparse' in str(type(A)):
nA = np.ravel(np.sqrt(A.power(2).sum(0)))
else:
nA = np.ravel(np.sqrt((A**2).sum(0)))
if A is not None:
A /= nA
if C is not None:
C = np.array(C)
C *= nA[:, None]
if YrA is not None:
YrA = np.array(YrA)
YrA *= nA[:, None]
if b is not None:
if issparse(b):
nB = np.ravel(np.sqrt(b.power(2).sum(0)))
b = csc_matrix(b)
for k, i in enumerate(b.indptr[:-1]):
b.data[i:b.indptr[k + 1]] /= nB[k]
else:
nB = np.ravel(np.sqrt((b**2).sum(0)))
b = np.atleast_2d(b)
b /= nB
if issparse(f):
f = csr_matrix(f)
for k, i in enumerate(f.indptr[:-1]):
f.data[i:f.indptr[k + 1]] *= nB[k]
else:
f = np.atleast_2d(f)
f *= nB[:, np.newaxis]
if neurons_sn is not None:
neurons_sn *= nA
return csc_matrix(A), C, YrA, b, f, neurons_sn
def get_file_size(file_name, var_name_hdf5='mov'):
""" Computes the dimensions of a file or a list of files without loading
it/them in memory. An exception is thrown if the files have FOVs with
different sizes
Args:
file_name: str or list
locations of file(s) in memory
var_name_hdf5: 'str'
if loading from hdf5 name of the variable to load
Returns:
dims: list
dimensions of FOV
T: list
number of timesteps in each file
"""
if isinstance(file_name, str):
if os.path.exists(file_name):
_, extension = os.path.splitext(file_name)[:2]
extension = extension.lower()
if extension == '.mat':
byte_stream, file_opened = scipy.io.matlab.mio._open_file(file_name, appendmat=False)
mjv, mnv = scipy.io.matlab.mio.get_matfile_version(byte_stream)
if mjv == 2:
extension = '.h5'
if extension in ['.tif', '.tiff', '.btf']:
tffl = tifffile.TiffFile(file_name)
siz = tffl.series[0].shape
T, dims = siz[0], siz[1:]
elif extension == '.avi':
cap = cv2.VideoCapture(file_name)
dims = [0, 0]
try:
T = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
dims[1] = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
dims[0] = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
except():
print('Roll back to opencv 2')
T = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
dims[1] = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
dims[0] = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
elif extension == '.mmap':
filename = os.path.split(file_name)[-1]
Yr, dims, T = load_memmap(os.path.join(
os.path.split(file_name)[0], filename))
elif extension in ('.h5', '.hdf5', '.nwb'):
with h5py.File(file_name, "r") as f:
kk = list(f.keys())
if len(kk) == 1:
siz = f[kk[0]].shape
elif var_name_hdf5 in f:
if extension == '.nwb':
siz = f[var_name_hdf5]['data'].shape
else:
siz = f[var_name_hdf5].shape
else:
logging.error('The file does not contain a variable' +
'named {0}'.format(var_name_hdf5))
raise Exception('Variable not found. Use one of the above')
T, dims = siz[0], siz[1:]
elif extension in ('.sbx'):
from ...base.movies import loadmat_sbx
info = loadmat_sbx(file_name[:-4]+ '.mat')['info']
dims = tuple((info['sz']).astype(int))
# Defining number of channels/size factor
if info['channels'] == 1:
info['nChan'] = 2
factor = 1
elif info['channels'] == 2:
info['nChan'] = 1
factor = 2
elif info['channels'] == 3:
info['nChan'] = 1
factor = 2
# Determine number of frames in whole file
T = int(os.path.getsize(
file_name[:-4] + '.sbx') / info['recordsPerBuffer'] / info['sz'][1] * factor / 4 - 1)
else:
raise Exception('Unknown file type')
dims = tuple(dims)
else:
raise Exception('File not found!')
elif isinstance(file_name, tuple):
from ...base.movies import load
dims = load(file_name[0], var_name_hdf5=var_name_hdf5).shape
T = len(file_name)
elif isinstance(file_name, list):
if len(file_name) == 1:
dims, T = get_file_size(file_name[0], var_name_hdf5=var_name_hdf5)
else:
dims, T = zip(*[get_file_size(fn, var_name_hdf5=var_name_hdf5)
for fn in file_name])
else:
raise Exception('Unknown input type')
return dims, T
def fast_graph_Laplacian(mmap_file, dims, max_radius=10, kernel='heat',
dview=None, sigma=1, thr=0.05, p=10, normalize=True,
use_NN=False, rf=None, strides=None):
""" Computes an approximate affinity maps and its graph Laplacian for all
pixels. For each pixel it restricts its attention to a given radius around
it.
Args:
mmap_file: str
Memory mapped file in pixel first order
max_radius: float
Maximum radius around each pixel
kernel: str {'heat', 'binary', 'cos'}
type of kernel
dview: dview object
multiprocessing or ipyparallel object for parallelization
sigma: float
standard deviation of Gaussian (heat) kernel
thr: float
threshold for affinity matrix
p: int
number of neighbors
normalize: bool
normalize vectors before computing affinity
use_NN: bool
use only p nearest neighbors
Returns:
W: scipy.sparse.csr_matrix
Graph affinity matrix
D: scipy.sparse.spdiags
Diagonal of affinity matrix
L: scipy.sparse.csr_matrix
Graph Laplacian matrix
"""
Np = np.prod(np.array(dims))
if rf is None:
pars = []
for i in range(Np):
pars.append([i, mmap_file, dims, max_radius, kernel, sigma, thr,
p, normalize, use_NN])
if dview is None:
res = list(map(fast_graph_Laplacian_pixel, pars))
else:
res = dview.map(fast_graph_Laplacian_pixel, pars, chunksize=128)
indptr = np.cumsum(np.array([0] + [len(r[0]) for r in res]))
indeces = [item for sublist in res for item in sublist[0]]
data = [item for sublist in res for item in sublist[1]]
W = scipy.sparse.csr_matrix((data, indeces, indptr), shape=[Np, Np])
D = scipy.sparse.spdiags(W.sum(0), 0, Np, Np)
L = D - W
else:
indices, _ = extract_patch_coordinates(dims, rf, strides)
pars = []
for i in range(len(indices)):
pars.append([mmap_file, indices[i], kernel, sigma, thr, p,
normalize, use_NN])
if dview is None:
res = list(map(fast_graph_Laplacian_patches, pars))
else:
res = dview.map(fast_graph_Laplacian_patches, pars)
W = res
D = [scipy.sparse.spdiags(w.sum(0), 0, w.shape[0], w.shape[0]) for w in W]
L = [d - w for (d, w) in zip(W, D)]
return W, D, L
def fast_graph_Laplacian_patches(pars):
""" Computes the full graph affinity matrix on a patch. See
fast_graph_Laplacian above for definition of arguments.
"""
mmap_file, indices, kernel, sigma, thr, p, normalize, use_NN = pars
if type(mmap_file) not in {'str', 'list'}:
Yind = mmap_file
else:
Y = load_memmap(mmap_file)[0]
Yind = np.array(Y[indices])
if normalize:
Yind -= Yind.mean(1)[:, np.newaxis]
Yind /= np.sqrt((Yind**2).sum(1)[:, np.newaxis])
yf = np.ones((Yind.shape[0], 1))
else:
yf = (Yind**2).sum(1)[:, np.newaxis]
yyt = Yind.dot(Yind.T)
W = np.exp(-(yf + yf.T - 2*yyt)/sigma) if kernel.lower() == 'heat' else yyt
W[W<thr] = 0
if kernel.lower() == 'binary':
W[W>0] = 1
if use_NN:
ind = np.argpartition(W, -p, axis=1)[:, :-p]
for i in range(W.shape[0]):
W[i, ind[i]] = 0
W = scipy.sparse.csr_matrix(W)
W = (W + W.T)/2
return W
def fast_graph_Laplacian_pixel(pars):
""" Computes the i-th row of the Graph affinity matrix. See
fast_graph_Laplacian above for definition of arguments.
"""
i, mmap_file, dims, max_radius, kernel, sigma, thr, p, normalize, use_NN = pars
iy, ix = np.unravel_index(i, dims, order='F')
xx = np.arange(0, dims[1]) - ix
yy = np.arange(0, dims[0]) - iy
[XX, YY] = np.meshgrid(xx, yy)
R = np.sqrt(XX**2 + YY**2)
R = R.flatten('F')
indeces = np.where(R < max_radius)[0]
Y = load_memmap(mmap_file)[0]
Yind = np.array(Y[indeces])
y = np.array(Y[i, :])
if normalize:
Yind -= Yind.mean(1)[:, np.newaxis]
Yind /= np.sqrt((Yind**2).sum(1)[:, np.newaxis])
y -= y.mean()
y /= np.sqrt((y**2).sum())
D = Yind - y
if kernel.lower() == 'heat':
w = np.exp(-np.sum(D**2, axis=1)/sigma)
else: # kernel.lower() == 'cos':
w = Yind.dot(y.T)
w[w<thr] = 0
if kernel.lower() == 'binary':
w[w>0] = 1
if use_NN:
ind = np.argpartition(w, -p)[-p:]
else:
ind = np.where(w>0)[0]
return indeces[ind].tolist(), w[ind].tolist()
|
agiovann/Constrained_NMF
|
caiman/source_extraction/cnmf/utilities.py
|
Python
|
gpl-2.0
| 40,754
|
#!/usr/bin/env python
# Multiple URL Command Client
#
# Combine a list of mv, cp, rm, and put commands on URLs into a single commit
#
# To read the help for this program, type python mucc.py --help
import os
from csvn.core import *
from csvn.repos import RemoteRepository, RepositoryURI
from csvn.auth import User
from optparse import OptionParser
usage = """python mucc.py [OPTION]... [ACTION]...
Actions:
cp REV URL1 URL2 copy URL1@REV to URL2
mkdir URL create new directory URL
mv URL1 URL2 move URL1 to URL2
rm URL delete URL
put SRC-FILE URL add or modify file URL with contents copied
from SRC-FILE
propset NAME VAL URL Set property NAME on URL to value VAL
propdel NAME URL Delete property NAME from URL
"""
# Read and parse options
parser = OptionParser(usage=usage)
parser.add_option("-m", "--message", dest="message",
help="use MESSAGE as a log message")
parser.add_option("-F", "--file", dest="file",
help="read log message from FILE")
parser.add_option("-u", "--username", dest="username",
help="commit the changes as USERNAME")
parser.add_option("-p", "--password", dest="password",
help="use password PASSWORD")
parser.add_option("-U", "--root-url", dest="root_url",
help="Interpret all action URLs as relative to ROOT_URL")
parser.add_option("-r", "--revision", dest="rev",
help="Use REV as baseline for changes")
parser.add_option("-X", "--extra-args ARG", dest="extra_args",
help='append arguments from file EXTRA_ARGS (one per line; '
'use "-" to read from standard input)')
(options, args) = parser.parse_args()
# Read any extra arguments
if options.extra_args:
f = file(options.extra_args)
for line in f:
args.append(line.strip())
if not args:
parser.print_help()
sys.exit(1)
# Initialize variables
root_url = options.root_url
actions = []
svn_cmdline_init("", stderr)
pool = Pool()
action = None
if root_url:
anchor = RepositoryURI(root_url)
else:
anchor = None
states = None
ancestor = None
# A list of the arguments accepted by each command
cmds = {
"cp": [ "rev", "url", "url" ],
"mkdir": [ "url" ],
"mv": [ "url", "url" ],
"rm": [ "url" ],
"put": [ "file", "url" ],
"propset": [ "name", "val", "url" ],
"propdel": [ "name", "url" ],
}
# Build up a list of the actions we want to perform
for arg in args:
if not states:
action = [arg]
actions.append((arg, action))
states = list(cmds[arg])
states.reverse()
else:
state = states.pop()
if state == "rev":
action.append(arg.upper() != "HEAD" and int(arg) or None)
elif state == "url":
arg = RepositoryURI(arg)
if anchor:
arg = anchor.join(arg)
action.append(arg)
# It's legal to make a copy of the repository root,
# so, we should treat copyfrom paths as possible
# repository roots
may_be_root = (len(action) == 2 and action[0] == "cp")
if not may_be_root:
arg = arg.dirname()
if ancestor:
ancestor = ancestor.longest_ancestor(arg)
else:
ancestor = arg
else:
action.append(arg)
session = RemoteRepository(ancestor, user=User(username=options.username))
txn = session.txn()
# Carry out the transaction
for action, args in actions:
if action == "cp":
txn.copy(src_rev=args[1], src_path=args[2], dest_path=args[3])
elif action == "mv":
txn.delete(str(args[1]))
txn.copy(src_path=args[1], dest_path=args[2])
elif action == "rm":
txn.delete(args[1])
elif action == "mkdir":
txn.mkdir(args[1])
elif action == "put":
txn.upload(local_path=args[1], remote_path=args[2])
elif action == "propset":
txn.propset(key=args[1], value=args[2], path=args[3])
elif action == "propdel":
txn.propdel(key=args[1], path=args[2])
# Get the log message
message = options.message
if options.file:
message = file(options.file).read()
# Finally commit
txn.commit(message)
print("r%ld committed by %s at %s" % (txn.committed_rev, options.username,
txn.committed_date))
|
bdmod/extreme-subversion
|
BinarySourcce/subversion-1.6.17/subversion/bindings/ctypes-python/examples/mucc.py
|
Python
|
gpl-2.0
| 4,445
|
#!/usr/bin/env python3
import os
import re
import subprocess
import sys
import threading
import time
import urllib
from subprocess import Popen, PIPE
sys.path.append("..")
from check_with_sitemap import CheckWithSitemap
DEFAULT_JAVA_PATH = 'java'
class CheckWithSiteMapVpro(CheckWithSitemap):
"""
This specialization is customized for VPRO.
It can connect via JMX to VPRO's Mangolia CMS which contains the original pages, and request it to index missing pages
This wraps a command line client for jmx: https://github.com/jiaqi/jmxterm/
"""
def __init__(self, java_path: str = DEFAULT_JAVA_PATH):
super().__init__()
self.jmx_url = self.args.jmx_url
self.jmxterm_binary = self.args.jmxterm_binary
self.java_path = java_path
self._get_jmx_term_if_necessary()
if self.args.tunnel:
tunnel = SshTunnel(self.log)
tunnel.start()
def add_arguments(self):
super().add_arguments()
api = self.api
api.add_argument('--jmx_url', type=str, default=None, help='use JMX to trigger reindex. An url like "localhost:500" where this is tunneled to the magnolia backend server')
api.add_argument('--jmxterm_binary', type=str, default=None, help='location of jmxterm binary')
api.add_argument('--tunnel', action='store_true', default=False, help='set up jmx tunnel too')
def perform_add_to_api(self, not_in_api: list):
"""
Actually add to api
"""
if self.jmx_url:
self.jmxterm = [self.java_path, '-jar', self.jmxterm_binary, '--url', self.jmx_url, "-n", "-v", "silent"]
not_in_api = self._reindex_3voor12(not_in_api)
not_in_api = self._reindex_cinema_films(not_in_api)
not_in_api = self._reindex_cinema_person(not_in_api)
not_in_api = self._reindex_mids(not_in_api)
self._reindex_urls(not_in_api)
else:
self.log.info("No jmx_url configured, not trying to implicitly add to api via JMX")
def _reindex_mids(self, not_in_api: list) -> list:
urls_with_mid = list(filter(lambda m: m[0] is not None, map(self._find_mid, not_in_api)))
return self._reindex_ids(not_in_api, urls_with_mid, "nl.vpro.magnolia:name=IndexerMaintainerImpl", "reindexMediaObjects", 100, "media objects")
def _reindex_3voor12(self, not_in_api: list) -> list:
urls_with_uuids = list(filter(lambda m: m[0] is not None, map(self._find_update_uuid, not_in_api)))
return self._reindex_ids(not_in_api, urls_with_uuids, "nl.vpro.magnolia:name=DrieVoorTwaalfUpdateIndexer", "reindexUUIDs", 100, "3voor12 updates")
def _reindex_cinema_films(self, not_in_api: list) -> list:
cinema_ids = list(filter(lambda m: m[0] is not None, map(self._find_cinema_film_id, not_in_api)))
return self._reindex_ids(not_in_api, cinema_ids, "nl.vpro.magnolia:name=CinemaObjectIndexer", "reindex", 100, "cinema films")
def _reindex_cinema_person(self, not_in_api: list) -> list:
cinema_ids = list(filter(lambda m: m[0] is not None, map(self._find_cinema_person_uid, not_in_api)))
return self._reindex_ids(not_in_api, cinema_ids, "nl.vpro.magnolia:name=CinemaPersonIndexer", "reindex", 100, "cinema persons")
def _reindex_urls(self, not_in_api: list) -> None:
page_size = 20
self.log.info("Reindexing %d urls" % len(not_in_api))
for i in range(0, len(not_in_api), page_size ):
self._call_jmx_operation("nl.vpro.magnolia:name=IndexerMaintainerImpl", "reindexUrls", not_in_api[i: i + page_size ])
def _find_mid(self, url: str) -> list:
return self._find_by_regexp(".*?~(.*?)~.*", url)
def _find_update_uuid(self, url: str) -> list:
return self._find_by_regexp(".*?update~(.*?)~.*", url)
def _find_cinema_film_id(self, url: str) -> list:
return self._find_by_regexp(".*?film~(.*?)~.*", url)
def _find_cinema_person_uid(self, url: str) -> list:
return self._find_by_regexp(".*?persoon~(.*?)~.*", url)
@staticmethod
def _find_by_regexp(regex: str, url: str) -> list:
matcher = re.match(regex, url)
if matcher:
return [matcher.group(1), url]
else:
return [None, url]
def _reindex_ids(
self, not_in_api: list,
ids: list,
bean: str,
operation: str, page_size: int, name: str) -> list:
self.log.info("Reindexing %d %s" % (len(ids), name))
for i in range(0, len(ids), page_size):
self._call_jmx_operation(bean, operation, list(map(lambda m : m[0], ids[i: i + page_size])))
urls = list(map(lambda u: u[1], ids))
self.log.debug("Associated with %s" % str(urls))
return [e for e in not_in_api if e not in urls]
def _call_jmx_operation(self, bean: str, operation: str, sub_list: list):
p = Popen(self.jmxterm, stdin=PIPE, stdout=PIPE, encoding='utf-8')
input = "bean " + bean +"\nrun " + operation + " " + ",".join(sub_list)
self.log.info("input\n%s" % input)
out, error = p.communicate(input=input, timeout=100)
self.log.info("output\n%s" % out)
if error:
self.log.info("error\n%s" % error)
if "still busy" in out:
self.log.info("Jmx reports that still busy. Let's wait a bit then")
time.sleep(20)
def _get_jmx_term_if_necessary(self):
if self.jmx_url and not self.jmxterm_binary:
from_env = os.getenv('JMXTERM_BINARY')
if not from_env is None:
self.jmxterm_binary=from_env
else:
jmxtermversion = "1.0.2"
jmxterm = "jmxterm-" + jmxtermversion + "-uber.jar"
path = os.path.dirname(os.path.realpath(__file__))
self.jmxterm_binary = os.path.join(path, jmxterm)
if not os.path.exists(self.jmxterm_binary):
get_url = "https://github.com/jiaqi/jmxterm/releases/download/v" + jmxtermversion + "/" + jmxterm
self.log.info("Downloading %s -> %s" % (get_url, self.jmxterm_binary))
urllib.request.urlretrieve (get_url, self.jmxterm_binary)
class SshTunnel(threading.Thread):
def __init__(self, log):
threading.Thread.__init__(self)
self.daemon = True # So that thread will exit when
# main non-daemon thread finishes
self.log = log
def run(self):
self.log.info("Setting up tunnel")
if subprocess.call([
'ssh', '-N', '-4',
'-L', '5000:localhost:5000',
'os2-magnolia-backend-prod-01'
]):
raise Exception ('ssh tunnel setup failed')
if __name__ == "__main__":
CheckWithSiteMapVpro().main()
|
npo-poms/scripts
|
python/vpro/check_with_sitemap_vpro.py
|
Python
|
gpl-2.0
| 6,894
|
import ntpath
import os
import sys
from test.test_support import TestFailed
from test import test_support, test_genericpath
import unittest
def tester0(fn, wantResult):
gotResult = eval(fn)
if wantResult != gotResult:
raise TestFailed, "%s should return: %r but returned: %r" \
%(fn, wantResult, gotResult)
def tester(fn, wantResult):
fn = fn.replace("\\", "\\\\")
tester0(fn, wantResult)
class TestNtpath(unittest.TestCase):
def test_splitext(self):
tester('ntpath.splitext("foo.ext")', ('foo', '.ext'))
tester('ntpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
tester('ntpath.splitext(".ext")', ('.ext', ''))
tester('ntpath.splitext("\\foo.ext\\foo")', ('\\foo.ext\\foo', ''))
tester('ntpath.splitext("foo.ext\\")', ('foo.ext\\', ''))
tester('ntpath.splitext("")', ('', ''))
tester('ntpath.splitext("foo.bar.ext")', ('foo.bar', '.ext'))
tester('ntpath.splitext("xx/foo.bar.ext")', ('xx/foo.bar', '.ext'))
tester('ntpath.splitext("xx\\foo.bar.ext")', ('xx\\foo.bar', '.ext'))
tester('ntpath.splitext("c:a/b\\c.d")', ('c:a/b\\c', '.d'))
def test_splitdrive(self):
tester('ntpath.splitdrive("c:\\foo\\bar")',
('c:', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
def test_splitunc(self):
tester('ntpath.splitunc("c:\\foo\\bar")',
('', 'c:\\foo\\bar'))
tester('ntpath.splitunc("c:/foo/bar")',
('', 'c:/foo/bar'))
tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitunc("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.splitunc("\\\\\\conky\\mountpoint\\foo\\bar")',
('', '\\\\\\conky\\mountpoint\\foo\\bar'))
tester('ntpath.splitunc("///conky/mountpoint/foo/bar")',
('', '///conky/mountpoint/foo/bar'))
tester('ntpath.splitunc("\\\\conky\\\\mountpoint\\foo\\bar")',
('', '\\\\conky\\\\mountpoint\\foo\\bar'))
tester('ntpath.splitunc("//conky//mountpoint/foo/bar")',
('', '//conky//mountpoint/foo/bar'))
self.assertEqual(ntpath.splitunc(u'//conky/MOUNTPO\u0130NT/foo/bar'),
(u'//conky/MOUNTPO\u0130NT', u'/foo/bar'))
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint\\foo', 'bar'))
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
('\\\\conky\\mountpoint', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
def test_isabs(self):
tester('ntpath.isabs("c:\\")', 1)
tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
tester('ntpath.isabs("\\foo")', 1)
tester('ntpath.isabs("\\foo\\bar")', 1)
def test_commonprefix(self):
tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
"/home/swen")
tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
"\\home\\swen\\")
tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
"/home/swen/spam")
def test_join(self):
tester('ntpath.join("")', '')
tester('ntpath.join("", "", "")', '')
tester('ntpath.join("a")', 'a')
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
tester("ntpath.join('a/', '')", 'a/')
tester("ntpath.join('a/b', 'x/y')", 'a/b\\x/y')
tester("ntpath.join('/a/b', 'x/y')", '/a/b\\x/y')
tester("ntpath.join('/a/b/', 'x/y')", '/a/b/x/y')
tester("ntpath.join('c:', 'x/y')", 'c:x/y')
tester("ntpath.join('c:a/b', 'x/y')", 'c:a/b\\x/y')
tester("ntpath.join('c:a/b/', 'x/y')", 'c:a/b/x/y')
tester("ntpath.join('c:/', 'x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', 'x/y')", 'c:/a/b\\x/y')
tester("ntpath.join('c:/a/b/', 'x/y')", 'c:/a/b/x/y')
#tester("ntpath.join('//computer/share', 'x/y')", '//computer/share\\x/y')
#tester("ntpath.join('//computer/share/', 'x/y')", '//computer/share/x/y')
#tester("ntpath.join('//computer/share/a/b', 'x/y')", '//computer/share/a/b\\x/y')
tester("ntpath.join('a/b', '/x/y')", '/x/y')
tester("ntpath.join('/a/b', '/x/y')", '/x/y')
tester("ntpath.join('c:', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:a/b', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', '/x/y')", 'c:/x/y')
#tester("ntpath.join('//computer/share', '/x/y')", '//computer/share/x/y')
#tester("ntpath.join('//computer/share/', '/x/y')", '//computer/share/x/y')
#tester("ntpath.join('//computer/share/a', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('c:', 'C:x/y')", 'C:x/y')
tester("ntpath.join('c:a/b', 'C:x/y')", 'C:a/b\\x/y')
tester("ntpath.join('c:/', 'C:x/y')", 'C:/x/y')
tester("ntpath.join('c:/a/b', 'C:x/y')", 'C:/a/b\\x/y')
for x in ('', 'a/b', '/a/b', 'c:', 'c:a/b', 'c:/', 'c:/a/b'):
for y in ('d:', 'd:x/y', 'd:/', 'd:/x/y'):
tester("ntpath.join(%r, %r)" % (x, y), y)
def test_normpath(self):
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
tester("ntpath.normpath('A/./B')", r'A\B')
tester("ntpath.normpath('A/foo/../B')", r'A\B')
tester("ntpath.normpath('C:A//B')", r'C:A\B')
tester("ntpath.normpath('D:A/./B')", r'D:A\B')
tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
tester("ntpath.normpath('C:///A//B')", r'C:\A\B')
tester("ntpath.normpath('D:///A/./B')", r'D:\A\B')
tester("ntpath.normpath('e:///A/foo/../B')", r'e:\A\B')
tester("ntpath.normpath('..')", r'..')
tester("ntpath.normpath('.')", r'.')
tester("ntpath.normpath('')", r'.')
tester("ntpath.normpath('/')", '\\')
tester("ntpath.normpath('c:/')", 'c:\\')
tester("ntpath.normpath('/../.././..')", '\\')
tester("ntpath.normpath('c:/../../..')", 'c:\\')
tester("ntpath.normpath('../.././..')", r'..\..\..')
tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
tester("ntpath.normpath('C:////a/b')", r'C:\a\b')
tester("ntpath.normpath('//machine/share//a/b')", r'\\machine\share\a\b')
tester("ntpath.normpath('\\\\.\\NUL')", r'\\.\NUL')
tester("ntpath.normpath('\\\\?\\D:/XY\\Z')", r'\\?\D:/XY\Z')
def test_expandvars(self):
with test_support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
tester('ntpath.expandvars("foo")', "foo")
tester('ntpath.expandvars("$foo bar")', "bar bar")
tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$[foo]bar")', "$[foo]bar")
tester('ntpath.expandvars("$bar bar")', "$bar bar")
tester('ntpath.expandvars("$?bar")', "$?bar")
tester('ntpath.expandvars("$foo}bar")', "bar}bar")
tester('ntpath.expandvars("${foo")', "${foo")
tester('ntpath.expandvars("${{foo}}")', "baz1}")
tester('ntpath.expandvars("$foo$foo")', "barbar")
tester('ntpath.expandvars("$bar$bar")', "$bar$bar")
tester('ntpath.expandvars("%foo% bar")', "bar bar")
tester('ntpath.expandvars("%foo%bar")', "barbar")
tester('ntpath.expandvars("%foo%%foo%")', "barbar")
tester('ntpath.expandvars("%%foo%%foo%foo%")', "%foo%foobar")
tester('ntpath.expandvars("%?bar%")', "%?bar%")
tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
@unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
def test_expandvars_nonascii(self):
encoding = sys.getfilesystemencoding()
def check(value, expected):
tester0("ntpath.expandvars(%r)" % value, expected)
tester0("ntpath.expandvars(%r)" % value.decode(encoding),
expected.decode(encoding))
with test_support.EnvironmentVarGuard() as env:
env.clear()
unonascii = test_support.FS_NONASCII
snonascii = unonascii.encode(encoding)
env['spam'] = snonascii
env[snonascii] = 'ham' + snonascii
check('$spam bar', '%s bar' % snonascii)
check('$%s bar' % snonascii, '$%s bar' % snonascii)
check('${spam}bar', '%sbar' % snonascii)
check('${%s}bar' % snonascii, 'ham%sbar' % snonascii)
check('$spam}bar', '%s}bar' % snonascii)
check('$%s}bar' % snonascii, '$%s}bar' % snonascii)
check('%spam% bar', '%s bar' % snonascii)
check('%{}% bar'.format(snonascii), 'ham%s bar' % snonascii)
check('%spam%bar', '%sbar' % snonascii)
check('%{}%bar'.format(snonascii), 'ham%sbar' % snonascii)
def test_expanduser(self):
tester('ntpath.expanduser("test")', 'test')
with test_support.EnvironmentVarGuard() as env:
env.clear()
tester('ntpath.expanduser("~test")', '~test')
env['HOMEPATH'] = 'eric\\idle'
env['HOMEDRIVE'] = 'C:\\'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
del env['HOMEDRIVE']
tester('ntpath.expanduser("~test")', 'eric\\test')
tester('ntpath.expanduser("~")', 'eric\\idle')
env.clear()
env['USERPROFILE'] = 'C:\\eric\\idle'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
env.clear()
env['HOME'] = 'C:\\idle\\eric'
tester('ntpath.expanduser("~test")', 'C:\\idle\\test')
tester('ntpath.expanduser("~")', 'C:\\idle\\eric')
tester('ntpath.expanduser("~test\\foo\\bar")',
'C:\\idle\\test\\foo\\bar')
tester('ntpath.expanduser("~test/foo/bar")',
'C:\\idle\\test/foo/bar')
tester('ntpath.expanduser("~\\foo\\bar")',
'C:\\idle\\eric\\foo\\bar')
tester('ntpath.expanduser("~/foo/bar")',
'C:\\idle\\eric/foo/bar')
def test_abspath(self):
# ntpath.abspath() can only be used on a system with the "nt" module
# (reasonably), so we protect this test with "import nt". This allows
# the rest of the tests for the ntpath module to be run to completion
# on any platform, since most of the module is intended to be usable
# from any platform.
# XXX this needs more tests
try:
import nt
except ImportError:
# check that the function is there even if we are not on Windows
ntpath.abspath
else:
tester('ntpath.abspath("C:\\")', "C:\\")
def test_relpath(self):
currentdir = os.path.split(os.getcwd())[-1]
tester('ntpath.relpath("a")', 'a')
tester('ntpath.relpath(os.path.abspath("a"))', 'a')
tester('ntpath.relpath("a/b")', 'a\\b')
tester('ntpath.relpath("../a/b")', '..\\a\\b')
tester('ntpath.relpath("a", "../b")', '..\\'+currentdir+'\\a')
tester('ntpath.relpath("a/b", "../c")', '..\\'+currentdir+'\\a\\b')
tester('ntpath.relpath("a", "b/c")', '..\\..\\a')
tester('ntpath.relpath("//conky/mountpoint/a", "//conky/mountpoint/b/c")', '..\\..\\a')
tester('ntpath.relpath("a", "a")', '.')
tester('ntpath.relpath("/foo/bar/bat", "/x/y/z")', '..\\..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("/foo/bar/bat", "/foo/bar")', 'bat')
tester('ntpath.relpath("/foo/bar/bat", "/")', 'foo\\bar\\bat')
tester('ntpath.relpath("/", "/foo/bar/bat")', '..\\..\\..')
tester('ntpath.relpath("/foo/bar/bat", "/x")', '..\\foo\\bar\\bat')
tester('ntpath.relpath("/x", "/foo/bar/bat")', '..\\..\\..\\x')
tester('ntpath.relpath("/", "/")', '.')
tester('ntpath.relpath("/a", "/a")', '.')
tester('ntpath.relpath("/a/b", "/a/b")', '.')
tester('ntpath.relpath("c:/foo", "C:/FOO")', '.')
class NtCommonTest(test_genericpath.CommonTest):
pathmodule = ntpath
attributes = ['relpath', 'splitunc']
def test_main():
test_support.run_unittest(TestNtpath, NtCommonTest)
if __name__ == "__main__":
unittest.main()
|
j5shi/Thruster
|
pylibs/test/test_ntpath.py
|
Python
|
gpl-2.0
| 14,186
|
# blender CAM utils.py (c) 2012 Vilem Novak
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# here is the main functionality of Blender CAM. The functions here are called with operators defined in ops.py.
import bpy
import time
import mathutils
import math
from math import *
from mathutils import *
from bpy.props import *
from bpy_extras import object_utils
import sys, numpy,pickle
from cam.chunk import *
from cam.collision import *
from cam.simple import *
from cam.pattern import *
from cam.polygon_utils_cam import *
from cam.image_utils import *
from cam.opencamlib.opencamlib import oclSample, oclSamplePoints, oclResampleChunks, oclGetWaterline
from shapely.geometry import polygon as spolygon
from shapely import ops as sops
from shapely import geometry as sgeometry
# from shapely.geometry import * not possible until Polygon libs gets out finally..
SHAPELY = True
def positionObject(operation):
ob = bpy.data.objects[operation.object_name]
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
minx, miny, minz, maxx, maxy, maxz = getBoundsWorldspace([ob], operation.use_modifiers)
totx=maxx-minx
toty=maxy-miny
totz=maxz-minz
if operation.material_center_x:
ob.location.x -= minx +totx/2
else:
ob.location.x -= minx
if operation.material_center_y:
ob.location.y -= miny +toty/2
else:
ob.location.y -= miny
if operation.material_Z== 'BELOW':
ob.location.z -= maxz
elif operation.material_Z == 'ABOVE':
ob.location.z -= minz
elif operation.material_Z == 'CENTERED':
ob.location.z -= minz +totz/2
if ob.type != 'CURVE':
bpy.ops.object.transform_apply(location=True, rotation=False, scale=False)
#addMaterialAreaObject()
def getBoundsWorldspace(obs, use_modifiers=False):
# progress('getting bounds of object(s)')
t = time.time()
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
for ob in obs:
# bb=ob.bound_box
mw = ob.matrix_world
if ob.type == 'MESH':
if use_modifiers:
depsgraph = bpy.context.evaluated_depsgraph_get()
mesh_owner = ob.evaluated_get(depsgraph)
mesh = mesh_owner.to_mesh()
else:
mesh = ob.data
for c in mesh.vertices:
coord = c.co
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
if use_modifiers:
mesh_owner.to_mesh_clear()
elif ob.type == "FONT":
activate(ob)
bpy.ops.object.duplicate()
co = bpy.context.active_object
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
bpy.ops.object.convert(target='MESH', keep_original=False)
mesh = co.data
for c in mesh.vertices:
coord = c.co
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
bpy.ops.object.delete()
bpy.ops.outliner.orphans_purge()
else:
# for coord in bb:
for c in ob.data.splines:
for p in c.bezier_points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
for p in c.points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
# progress(time.time()-t)
return minx, miny, minz, maxx, maxy, maxz
def getSplineBounds(ob, curve):
# progress('getting bounds of object(s)')
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
mw = ob.matrix_world
for p in curve.bezier_points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
for p in curve.points:
coord = p.co
# this can work badly with some imported curves, don't know why...
# worldCoord = mw * Vector((coord[0]/ob.scale.x, coord[1]/ob.scale.y, coord[2]/ob.scale.z))
worldCoord = mw @ Vector((coord[0], coord[1], coord[2]))
minx = min(minx, worldCoord.x)
miny = min(miny, worldCoord.y)
minz = min(minz, worldCoord.z)
maxx = max(maxx, worldCoord.x)
maxy = max(maxy, worldCoord.y)
maxz = max(maxz, worldCoord.z)
# progress(time.time()-t)
return minx, miny, minz, maxx, maxy, maxz
def getOperationSources(o):
if o.geometry_source == 'OBJECT':
# bpy.ops.object.select_all(action='DESELECT')
ob = bpy.data.objects[o.object_name]
o.objects = [ob]
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
if o.enable_B or o.enable_A:
if o.old_rotation_A != o.rotation_A or o.old_rotation_B != o.rotation_B:
o.old_rotation_A = o.rotation_A
o.old_rotation_B = o.rotation_B
ob=bpy.data.objects[o.object_name]
ob.select_set(True)
bpy.context.view_layer.objects.active = ob
if o.A_along_x : #A parallel with X
if o.enable_A:
bpy.context.active_object.rotation_euler.x = o.rotation_A
if o.enable_B:
bpy.context.active_object.rotation_euler.y = o.rotation_B
else : #A parallel with Y
if o.enable_A:
bpy.context.active_object.rotation_euler.y = o.rotation_A
if o.enable_B:
bpy.context.active_object.rotation_euler.x = o.rotation_B
elif o.geometry_source == 'COLLECTION':
collection = bpy.data.collections[o.collection_name]
o.objects = collection.objects
elif o.geometry_source == 'IMAGE':
o.use_exact = False
if o.geometry_source == 'OBJECT' or o.geometry_source == 'COLLECTION':
o.onlycurves = True
for ob in o.objects:
if ob.type == 'MESH':
o.onlycurves = False
else:
o.onlycurves = False
def getBounds(o):
# print('kolikrat sem rpijde')
if o.geometry_source == 'OBJECT' or o.geometry_source == 'COLLECTION' or o.geometry_source == 'CURVE':
print("valid geometry")
minx, miny, minz, maxx, maxy, maxz = getBoundsWorldspace(o.objects, o.use_modifiers)
if o.minz_from_ob:
if minz == 10000000:
minz = 0
print("minz from object:" + str(minz))
o.min.z = minz
o.minz = o.min.z
else:
o.min.z = o.minz # max(bb[0][2]+l.z,o.minz)#
print("not minz from object")
if o.material_from_model:
print("material_from_model")
o.min.x = minx - o.material_radius_around_model
o.min.y = miny - o.material_radius_around_model
o.max.z = max(o.maxz, maxz)
o.max.x = maxx + o.material_radius_around_model
o.max.y = maxy + o.material_radius_around_model
else:
print("not material from model")
o.min.x = o.material_origin.x
o.min.y = o.material_origin.y
o.min.z = o.material_origin.z - o.material_size.z
o.max.x = o.min.x + o.material_size.x
o.max.y = o.min.y + o.material_size.y
o.max.z = o.material_origin.z
else:
i = bpy.data.images[o.source_image_name]
if o.source_image_crop:
sx = int(i.size[0] * o.source_image_crop_start_x / 100)
ex = int(i.size[0] * o.source_image_crop_end_x / 100)
sy = int(i.size[1] * o.source_image_crop_start_y / 100)
ey = int(i.size[1] * o.source_image_crop_end_y / 100)
# operation.image.resize(ex-sx,ey-sy)
crop = (sx, sy, ex, ey)
else:
sx = 0
ex = i.size[0]
sy = 0
ey = i.size[1]
o.pixsize = o.source_image_size_x / i.size[0]
o.min.x = o.source_image_offset.x + (sx) * o.pixsize
o.max.x = o.source_image_offset.x + (ex) * o.pixsize
o.min.y = o.source_image_offset.y + (sy) * o.pixsize
o.max.y = o.source_image_offset.y + (ey) * o.pixsize
o.min.z = o.source_image_offset.z + o.minz
o.max.z = o.source_image_offset.z
s = bpy.context.scene
m = s.cam_machine
if o.max.x - o.min.x > m.working_area.x or o.max.y - o.min.y > m.working_area.y or o.max.z - o.min.z > m.working_area.z:
# o.max.x=min(o.min.x+m.working_area.x,o.max.x)
# o.max.y=min(o.min.y+m.working_area.y,o.max.y)
# o.max.z=min(o.min.z+m.working_area.z,o.max.z)
o.warnings += 'Operation exceeds your machine limits\n'
# progress (o.min.x,o.min.y,o.min.z,o.max.x,o.max.y,o.max.z)
def getBoundsMultiple(operations):
"gets bounds of multiple operations, mainly for purpose of simulations or rest milling. highly suboptimal."
maxx = maxy = maxz = -10000000
minx = miny = minz = 10000000
for o in operations:
getBounds(o)
maxx = max(maxx, o.max.x)
maxy = max(maxy, o.max.y)
maxz = max(maxz, o.max.z)
minx = min(minx, o.min.x)
miny = min(miny, o.min.y)
minz = min(minz, o.min.z)
return minx, miny, minz, maxx, maxy, maxz
def samplePathLow(o, ch1, ch2, dosample):
v1 = Vector(ch1.points[-1])
v2 = Vector(ch2.points[0])
v = v2 - v1
d = v.length
v.normalize()
vref = Vector((0, 0, 0))
bpath = camPathChunk([])
i = 0
while vref.length < d:
i += 1
vref = v * o.dist_along_paths * i
if vref.length < d:
p = v1 + vref
bpath.points.append([p.x, p.y, p.z])
# print('between path')
# print(len(bpath))
pixsize = o.pixsize
if dosample:
if not (o.use_opencamlib and o.use_exact):
if o.use_exact:
if o.update_bullet_collision_tag:
prepareBulletCollision(o)
o.update_bullet_collision_tag = False
cutterdepth = o.cutter_shape.dimensions.z / 2
for p in bpath.points:
z = getSampleBullet(o.cutter_shape, p[0], p[1], cutterdepth, 1, o.minz)
if z > p[2]:
p[2] = z
else:
for p in bpath.points:
xs = (p[0] - o.min.x) / pixsize + o.borderwidth + pixsize / 2 # -m
ys = (p[1] - o.min.y) / pixsize + o.borderwidth + pixsize / 2 # -m
z = getSampleImage((xs, ys), o.offset_image, o.minz) + o.skin
if z > p[2]:
p[2] = z
return bpath
# def threadedSampling():#not really possible at all without running more blenders for same operation :( python!
# samples in both modes now - image and bullet collision too.
def sampleChunks(o, pathSamples, layers):
#
minx, miny, minz, maxx, maxy, maxz = o.min.x, o.min.y, o.min.z, o.max.x, o.max.y, o.max.z
getAmbient(o)
if o.use_exact: # prepare collision world
if o.use_opencamlib:
oclSample(o, pathSamples)
cutterdepth = 0
else:
if o.update_bullet_collision_tag:
prepareBulletCollision(o)
o.update_bullet_collision_tag = False
# print (o.ambient)
cutter = o.cutter_shape
cutterdepth = cutter.dimensions.z / 2
else:
if o.strategy != 'WATERLINE': # or prepare offset image, but not in some strategies.
prepareArea(o)
pixsize = o.pixsize
coordoffset = o.borderwidth + pixsize / 2 # -m
res = ceil(o.cutter_diameter / o.pixsize)
m = res / 2
t = time.time()
# print('sampling paths')
totlen = 0; # total length of all chunks, to estimate sampling time.
for ch in pathSamples:
totlen += len(ch.points)
layerchunks = []
minz = o.minz - 0.000001 # correction for image method problems
layeractivechunks = []
lastrunchunks = []
for l in layers:
layerchunks.append([])
layeractivechunks.append(camPathChunk([]))
lastrunchunks.append([])
zinvert = 0
if o.inverse:
ob = bpy.data.objects[o.object_name]
zinvert = ob.location.z + maxz # ob.bound_box[6][2]
n = 0
last_percent = -1
# timing for optimisation
samplingtime = timinginit()
sortingtime = timinginit()
totaltime = timinginit()
timingstart(totaltime)
lastz = minz
for patternchunk in pathSamples:
thisrunchunks = []
for l in layers:
thisrunchunks.append([])
lastlayer = None
currentlayer = None
lastsample = None
# threads_count=4
# for t in range(0,threads):
for s in patternchunk.points:
if o.strategy != 'WATERLINE' and int(100 * n / totlen) != last_percent:
last_percent = int(100 * n / totlen)
progress('sampling paths ', last_percent)
n += 1
x = s[0]
y = s[1]
if not o.ambient.contains(sgeometry.Point(x, y)):
newsample = (x, y, 1)
else:
if o.use_opencamlib and o.use_exact:
z = s[2]
if minz > z:
z = minz
newsample = (x, y, z)
####sampling
elif o.use_exact and not o.use_opencamlib:
if lastsample != None: # this is an optimalization, search only for near depths to the last sample. Saves about 30% of sampling time.
z = getSampleBullet(cutter, x, y, cutterdepth, 1,
lastsample[2] - o.dist_along_paths) # first try to the last sample
if z < minz - 1:
z = getSampleBullet(cutter, x, y, cutterdepth, lastsample[2] - o.dist_along_paths, minz)
else:
z = getSampleBullet(cutter, x, y, cutterdepth, 1, minz)
# print(z)
# here we have
else:
timingstart(samplingtime)
xs = (x - minx) / pixsize + coordoffset
ys = (y - miny) / pixsize + coordoffset
timingadd(samplingtime)
# if o.inverse:
# z=layerstart
z = getSampleImage((xs, ys), o.offset_image, minz) + o.skin
# if minz>z and o.ambient.isInside(x,y):
# z=minz;
################################
# handling samples
############################################
if minz > z:
z = minz
newsample = (x, y, z)
# z=max(minz,z)
# if sampled:# and (not o.inverse or (o.inverse)):uh what was this? disabled
# newsample=(x,y,z)
# elif o.ambient_behaviour=='ALL' and not o.inverse:#handle ambient here, this should be obsolete,
# newsample=(x,y,minz)
for i, l in enumerate(layers):
terminatechunk = False
ch = layeractivechunks[i]
# print(i,l)
# print(l[1],l[0])
if l[1] <= newsample[2] <= l[0]:
lastlayer = None # rather the last sample here ? has to be set to None, since sometimes lastsample vs lastlayer didn't fit and did ugly ugly stuff....
if lastsample != None:
for i2, l2 in enumerate(layers):
if l2[1] <= lastsample[2] <= l2[0]:
lastlayer = i2
currentlayer = i
if lastlayer != None and lastlayer != currentlayer: # and lastsample[2]!=newsample[2]:#sampling for sorted paths in layers- to go to the border of the sampled layer at least...there was a bug here, but should be fixed.
if currentlayer < lastlayer:
growing = True
r = range(currentlayer, lastlayer)
spliti = 1
else:
r = range(lastlayer, currentlayer)
growing = False
spliti = 0
# print(r)
li = 0
for ls in r:
splitz = layers[ls][1]
# print(ls)
v1 = lastsample
v2 = newsample
if o.protect_vertical:
v1, v2 = isVerticalLimit(v1, v2, o.protect_vertical_limit)
v1 = Vector(v1)
v2 = Vector(v2)
# print(v1,v2)
ratio = (splitz - v1.z) / (v2.z - v1.z)
# print(ratio)
betweensample = v1 + (v2 - v1) * ratio
# ch.points.append(betweensample.to_tuple())
if growing:
if li > 0:
layeractivechunks[ls].points.insert(-1, betweensample.to_tuple())
else:
layeractivechunks[ls].points.append(betweensample.to_tuple())
layeractivechunks[ls + 1].points.append(betweensample.to_tuple())
else:
# print(v1,v2,betweensample,lastlayer,currentlayer)
layeractivechunks[ls].points.insert(-1, betweensample.to_tuple())
layeractivechunks[ls + 1].points.insert(0, betweensample.to_tuple())
li += 1
# this chunk is terminated, and allready in layerchunks /
# ch.points.append(betweensample.to_tuple())#
ch.points.append(newsample)
elif l[1] > newsample[2]:
ch.points.append((newsample[0], newsample[1], l[1]))
elif l[0] < newsample[2]: # terminate chunk
terminatechunk = True
if terminatechunk:
if len(ch.points) > 0:
layerchunks[i].append(ch)
thisrunchunks[i].append(ch)
layeractivechunks[i] = camPathChunk([])
lastsample = newsample
for i, l in enumerate(layers):
ch = layeractivechunks[i]
if len(ch.points) > 0:
layerchunks[i].append(ch)
thisrunchunks[i].append(ch)
layeractivechunks[i] = camPathChunk([])
# PARENTING
if o.strategy == 'PARALLEL' or o.strategy == 'CROSS' or o.strategy == 'OUTLINEFILL':
timingstart(sortingtime)
parentChildDist(thisrunchunks[i], lastrunchunks[i], o)
timingadd(sortingtime)
lastrunchunks = thisrunchunks
# print(len(layerchunks[i]))
progress('checking relations between paths')
timingstart(sortingtime)
if o.strategy == 'PARALLEL' or o.strategy == 'CROSS' or o.strategy == 'OUTLINEFILL':
if len(layers) > 1: # sorting help so that upper layers go first always
for i in range(0, len(layers) - 1):
parents = []
children = []
# only pick chunks that should have connectivity assigned - 'last' and 'first' ones of the layer.
for ch in layerchunks[i + 1]:
if ch.children == []:
parents.append(ch)
for ch1 in layerchunks[i]:
if ch1.parents == []:
children.append(ch1)
parentChild(parents, children, o) # parent only last and first chunk, before it did this for all.
timingadd(sortingtime)
chunks = []
for i, l in enumerate(layers):
if o.ramp:
for ch in layerchunks[i]:
ch.zstart = layers[i][0]
ch.zend = layers[i][1]
chunks.extend(layerchunks[i])
timingadd(totaltime)
print(samplingtime)
print(sortingtime)
print(totaltime)
return chunks
def sampleChunksNAxis(o, pathSamples, layers):
#
minx, miny, minz, maxx, maxy, maxz = o.min.x, o.min.y, o.min.z, o.max.x, o.max.y, o.max.z
# prepare collision world
if o.update_bullet_collision_tag:
prepareBulletCollision(o)
# print('getting ambient')
getAmbient(o)
o.update_bullet_collision_tag = False
# print (o.ambient)
cutter = o.cutter_shape
cutterdepth = cutter.dimensions.z / 2
t = time.time()
print('sampling paths')
totlen = 0 # total length of all chunks, to estimate sampling time.
for chs in pathSamples:
totlen += len(chs.startpoints)
layerchunks = []
minz = o.minz
layeractivechunks = []
lastrunchunks = []
for l in layers:
layerchunks.append([])
layeractivechunks.append(camPathChunk([]))
lastrunchunks.append([])
n = 0
lastz = minz
for patternchunk in pathSamples:
# print (patternchunk.endpoints)
thisrunchunks = []
for l in layers:
thisrunchunks.append([])
lastlayer = None
currentlayer = None
lastsample = None
# threads_count=4
lastrotation = (0, 0, 0)
# for t in range(0,threads):
# print(len(patternchunk.startpoints),len( patternchunk.endpoints))
spl = len(patternchunk.startpoints)
for si in range(0,
spl): # ,startp in enumerate(patternchunk.startpoints):#TODO: seems we are writing into the source chunk , and that is why we need to write endpoints everywhere too?
if n / 200.0 == int(n / 200.0):
progress('sampling paths ', int(100 * n / totlen))
n += 1
sampled = False
# print(si)
# get the vector to sample
startp = Vector(patternchunk.startpoints[si])
endp = Vector(patternchunk.endpoints[si])
rotation = patternchunk.rotations[si]
sweepvect = endp - startp
sweepvect.normalize()
####sampling
if rotation != lastrotation:
cutter.rotation_euler = rotation
# cutter.rotation_euler.x=-cutter.rotation_euler.x
# print(rotation)
if o.cutter_type == 'VCARVE': # Bullet cone is always pointing Up Z in the object
cutter.rotation_euler.x += pi
cutter.update_tag()
# bpy.context.scene.frame_set(-1)
# bpy.context.scene.update()
# bpy.context.scene.frame_set(1)
bpy.context.scene.frame_set(
1) # this has to be :( it resets the rigidbody world. No other way to update it probably now :(
bpy.context.scene.frame_set(2) # actually 2 frame jumps are needed.
bpy.context.scene.frame_set(0)
#
#
# bpy.context.scene.frame_set(-1)
# bpy.context.scene.update()
# update scene here?
# print(startp,endp)
# samplestartp=startp+sweepvect*0.3#this is correction for the sweep algorithm to work better.
newsample = getSampleBulletNAxis(cutter, startp, endp, rotation, cutterdepth)
# print('totok',startp,endp,rotation,newsample)
################################
# handling samples
############################################
if newsample != None: # this is weird, but will leave it this way now.. just prototyping here.
sampled = True
else: # TODO: why was this here?
newsample = startp
sampled = True
# print(newsample)
# elif o.ambient_behaviour=='ALL' and not o.inverse:#handle ambient here
# newsample=(x,y,minz)
if sampled:
for i, l in enumerate(layers):
terminatechunk = False
ch = layeractivechunks[i]
# print(i,l)
# print(l[1],l[0])
v = startp - newsample
distance = -v.length
if l[1] <= distance <= l[0]:
lastlayer = currentlayer
currentlayer = i
if lastsample != None and lastlayer != None and currentlayer != None and lastlayer != currentlayer: # sampling for sorted paths in layers- to go to the border of the sampled layer at least...there was a bug here, but should be fixed.
if currentlayer < lastlayer:
growing = True
r = range(currentlayer, lastlayer)
spliti = 1
else:
r = range(lastlayer, currentlayer)
growing = False
spliti = 0
# print(r)
li = 0
for ls in r:
splitdistance = layers[ls][1]
# v1=lastsample
# v2=newsample
# if o.protect_vertical:#different algo for N-Axis! need sto be perpendicular to or whatever.
# v1,v2=isVerticalLimit(v1,v2,o.protect_vertical_limit)
# v1=Vector(v1)
# v2=Vector(v2)
# print(v1,v2)
ratio = (splitdistance - lastdistance) / (distance - lastdistance)
# print(ratio)
betweensample = lastsample + (newsample - lastsample) * ratio
# this probably doesn't work at all!!!! check this algoritm>
betweenrotation = tuple_add(lastrotation,
tuple_mul(tuple_sub(rotation, lastrotation), ratio))
# startpoint = retract point, it has to be always available...
betweenstartpoint = laststartpoint + (startp - laststartpoint) * ratio
# here, we need to have also possible endpoints always..
betweenendpoint = lastendpoint + (endp - lastendpoint) * ratio
if growing:
if li > 0:
layeractivechunks[ls].points.insert(-1, betweensample)
layeractivechunks[ls].rotations.insert(-1, betweenrotation)
layeractivechunks[ls].startpoints.insert(-1, betweenstartpoint)
layeractivechunks[ls].endpoints.insert(-1, betweenendpoint)
else:
layeractivechunks[ls].points.append(betweensample)
layeractivechunks[ls].rotations.append(betweenrotation)
layeractivechunks[ls].startpoints.append(betweenstartpoint)
layeractivechunks[ls].endpoints.append(betweenendpoint)
layeractivechunks[ls + 1].points.append(betweensample)
layeractivechunks[ls + 1].rotations.append(betweenrotation)
layeractivechunks[ls + 1].startpoints.append(betweenstartpoint)
layeractivechunks[ls + 1].endpoints.append(betweenendpoint)
else:
layeractivechunks[ls].points.insert(-1, betweensample)
layeractivechunks[ls].rotations.insert(-1, betweenrotation)
layeractivechunks[ls].startpoints.insert(-1, betweenstartpoint)
layeractivechunks[ls].endpoints.insert(-1, betweenendpoint)
layeractivechunks[ls + 1].points.append(betweensample)
layeractivechunks[ls + 1].rotations.append(betweenrotation)
layeractivechunks[ls + 1].startpoints.append(betweenstartpoint)
layeractivechunks[ls + 1].endpoints.append(betweenendpoint)
# layeractivechunks[ls+1].points.insert(0,betweensample)
li += 1
# this chunk is terminated, and allready in layerchunks /
# ch.points.append(betweensample)#
ch.points.append(newsample)
ch.rotations.append(rotation)
ch.startpoints.append(startp)
ch.endpoints.append(endp)
lastdistance = distance
elif l[1] > distance:
v = sweepvect * l[1]
p = startp - v
ch.points.append(p)
ch.rotations.append(rotation)
ch.startpoints.append(startp)
ch.endpoints.append(endp)
elif l[0] < distance: # retract to original track
ch.points.append(startp)
ch.rotations.append(rotation)
ch.startpoints.append(startp)
ch.endpoints.append(endp)
# terminatechunk=True
# if terminatechunk:
# #print(ch.points)
# if len(ch.points)>0:
# if len(ch.points)>0:
# layerchunks[i].append(ch)
# thisrunchunks[i].append(ch)
# layeractivechunks[i]=camPathChunk([])
# else:
# terminatechunk=True
lastsample = newsample
lastrotation = rotation
laststartpoint = startp
lastendpoint = endp
for i, l in enumerate(layers):
ch = layeractivechunks[i]
if len(ch.points) > 0:
layerchunks[i].append(ch)
thisrunchunks[i].append(ch)
layeractivechunks[i] = camPathChunk([])
if (o.strategy == 'PARALLEL' or o.strategy == 'CROSS' or o.strategy == 'OUTLINEFILL'):
parentChildDist(thisrunchunks[i], lastrunchunks[i], o)
lastrunchunks = thisrunchunks
# print(len(layerchunks[i]))
progress('checking relations between paths')
"""#this algorithm should also work for n-axis, but now is "sleeping"
if (o.strategy=='PARALLEL' or o.strategy=='CROSS'):
if len(layers)>1:# sorting help so that upper layers go first always
for i in range(0,len(layers)-1):
#print('layerstuff parenting')
parentChild(layerchunks[i+1],layerchunks[i],o)
"""
chunks = []
for i, l in enumerate(layers):
chunks.extend(layerchunks[i])
return chunks
def extendChunks5axis(chunks, o):
s = bpy.context.scene
m = s.cam_machine
s = bpy.context.scene
free_movement_height = o.free_movement_height # o.max.z +
if m.use_position_definitions: # dhull
cutterstart = Vector((m.starting_position.x, m.starting_position.y,
max(o.max.z, m.starting_position.z))) # start point for casting
else:
cutterstart = Vector((0, 0, max(o.max.z, free_movement_height))) # start point for casting
cutterend = Vector((0, 0, o.min.z))
oriname = o.name + ' orientation'
ori = s.objects[oriname]
# rotationaxes = rotTo2axes(ori.rotation_euler,'CA')#warning-here it allready is reset to 0!!
print('rot', o.rotationaxes)
a, b = o.rotationaxes # this is all nonsense by now.
for chunk in chunks:
for v in chunk.points:
cutterstart.x = v[0]
cutterstart.y = v[1]
cutterend.x = v[0]
cutterend.y = v[1]
chunk.startpoints.append(cutterstart.to_tuple())
chunk.endpoints.append(cutterend.to_tuple())
chunk.rotations.append(
(a, b, 0)) # TODO: this is a placeholder. It does 99.9% probably write total nonsense.
def curveToShapely(cob, use_modifiers=False):
chunks = curveToChunks(cob, use_modifiers)
polys = chunksToShapely(chunks)
return polys
# separate function in blender, so you can offset any curve.
# FIXME: same algorithms as the cutout strategy, because that is hierarchy-respecting.
def silhoueteOffset(context, offset,style = 1,mitrelimit = 1.0):
bpy.context.scene.cursor.location = (0, 0, 0)
ob = bpy.context.active_object
if ob.type == 'CURVE' or ob.type == 'FONT':
silhs = curveToShapely(ob)
else:
silhs = getObjectSilhouete('OBJECTS', [ob])
polys = []
mp = shapely.ops.unary_union(silhs)
print("offset attributes:")
print(offset,style)
mp = mp.buffer(offset, cap_style = 1, join_style=style, resolution=16, mitre_limit=mitrelimit)
shapelyToCurve(ob.name +'_offset_'+str(round(offset,5)), mp, ob.location.z)
return {'FINISHED'}
def polygonBoolean(context, boolean_type):
bpy.context.scene.cursor.location = (0, 0, 0)
ob = bpy.context.active_object
obs = []
for ob1 in bpy.context.selected_objects:
if ob1 != ob:
obs.append(ob1)
plist = curveToShapely(ob)
p1 = sgeometry.asMultiPolygon(plist)
polys = []
for o in obs:
plist = curveToShapely(o)
p2 = sgeometry.asMultiPolygon(plist)
polys.append(p2)
# print(polys)
if boolean_type == 'UNION':
for p2 in polys:
p1 = p1.union(p2)
elif boolean_type == 'DIFFERENCE':
for p2 in polys:
p1 = p1.difference(p2)
elif boolean_type == 'INTERSECT':
for p2 in polys:
p1 = p1.intersection(p2)
shapelyToCurve('boolean', p1, ob.location.z)
# bpy.ops.object.convert(target='CURVE')
# bpy.context.scene.cursor_location=ob.location
# bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
return {'FINISHED'}
def polygonConvexHull(context):
coords = []
bpy.ops.object.duplicate()
bpy.ops.object.join()
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.active_object.name = "_tmp"
bpy.ops.object.convert(target='MESH')
obj = bpy.context.view_layer.objects.active
for v in obj.data.vertices: # extract X,Y coordinates from the vertices data
c=(v.co.x, v.co.y)
coords.append(c)
simple.removeMultiple('_tmp') # delete temporary mesh
simple.removeMultiple('ConvexHull') # delete old hull
points = sgeometry.MultiPoint(coords) # convert coordinates to shapely MultiPoint datastructure
hull = points.convex_hull
shapelyToCurve('ConvexHull', hull, 0.0)
return {'FINISHED'}
def Helix(r, np, zstart, pend, rev):
c = []
pi = math.pi
v = mathutils.Vector((r, 0, zstart))
e = mathutils.Euler((0, 0, 2.0 * pi / np))
zstep = (zstart - pend[2]) / (np * rev)
for a in range(0, int(np * rev)):
c.append((v.x + pend[0], v.y + pend[1], zstart - (a * zstep)))
v.rotate(e)
c.append((v.x + pend[0], v.y + pend[1], pend[2]))
return c
def comparezlevel(x):
return x[5]
def overlaps(bb1, bb2): # true if bb1 is child of bb2
ch1 = bb1
ch2 = bb2
if (ch2[1] > ch1[1] > ch1[0] > ch2[0] and ch2[3] > ch1[3] > ch1[2] > ch2[2]):
return True
def connectChunksLow(chunks, o):
""" connects chunks that are close to each other without lifting, sampling them 'low' """
if not o.stay_low or (o.strategy == 'CARVE' and o.carve_depth > 0):
return chunks
connectedchunks = []
chunks_to_resample = [] # for OpenCAMLib sampling
mergedist = 3 * o.dist_between_paths
if o.strategy == 'PENCIL': # this is bigger for pencil path since it goes on the surface to clean up the rests, and can go to close points on the surface without fear of going deep into material.
mergedist = 10 * o.dist_between_paths
if o.strategy == 'MEDIAL_AXIS':
mergedist = 1 * o.medial_axis_subdivision
if o.parallel_step_back:
mergedist *= 2
if o.merge_dist > 0:
mergedist = o.merge_dist
# mergedist=10
lastch = None
i = len(chunks)
pos = (0, 0, 0)
for ch in chunks:
if len(ch.points) > 0:
if lastch != None and (ch.distStart(pos, o) < mergedist):
# CARVE should lift allways, when it goes below surface...
# print(mergedist,ch.dist(pos,o))
if o.strategy == 'PARALLEL' or o.strategy == 'CROSS' or o.strategy == 'PENCIL': # for these paths sorting happens after sampling, thats why they need resample the connection
between = samplePathLow(o, lastch, ch, True)
else:
# print('addbetwee')
between = samplePathLow(o, lastch, ch,
False) # other paths either dont use sampling or are sorted before it.
if o.use_opencamlib and o.use_exact and (
o.strategy == 'PARALLEL' or o.strategy == 'CROSS' or o.strategy == 'PENCIL'):
chunks_to_resample.append(
(connectedchunks[-1], len(connectedchunks[-1].points), len(between.points)))
connectedchunks[-1].points.extend(between.points)
connectedchunks[-1].points.extend(ch.points)
else:
connectedchunks.append(ch)
lastch = ch
pos = lastch.points[-1]
if o.use_opencamlib and o.use_exact and o.strategy != 'CUTOUT' and o.strategy != 'POCKET':
oclResampleChunks(o, chunks_to_resample)
return connectedchunks
def getClosest(o, pos, chunks):
# ch=-1
mind = 10000
d = 100000000000
ch = None
for chtest in chunks:
cango = True
for child in chtest.children: # here was chtest.getNext==chtest, was doing recursion error and slowing down.
if child.sorted == False:
cango = False
break;
if cango:
d = chtest.dist(pos, o)
if d < mind:
ch = chtest
mind = d
return ch
def sortChunks(chunks, o):
if o.strategy != 'WATERLINE':
progress('sorting paths')
sys.setrecursionlimit(100000) # the getNext() function of CamPathChunk was running out of recursion limits.
sortedchunks = []
chunks_to_resample = []
lastch = None
i = len(chunks)
pos = (0, 0, 0)
# for ch in chunks:
# ch.getNext()#this stores the unsortedchildren properties
# print('numofchunks')
# print(len(chunks))
while len(chunks) > 0:
ch = None
if len(sortedchunks) == 0 or len(
lastch.parents) == 0: # first chunk or when there are no parents -> parents come after children here...
ch = getClosest(o, pos, chunks)
elif len(lastch.parents) > 0: # looks in parents for next candidate, recursively
# get siblings here
# siblings=[]
# for chs in lastch.parents:
# siblings.extend(chs.children)
# ch = getClosest(o,pos,siblings)
# if ch==None:
# ch = getClosest(o,pos,chunks)
for parent in lastch.parents:
ch = parent.getNextClosest(o, pos)
if ch != None:
break
if ch == None:
ch = getClosest(o, pos, chunks)
# break
# pass;
if ch is not None: # found next chunk, append it to list
# only adaptdist the chunk if it has not been sorted before
if not ch.sorted:
ch.adaptdist(pos, o)
ch.sorted = True
# print(len(ch.parents),'children')
chunks.remove(ch)
sortedchunks.append(ch)
lastch = ch
pos = lastch.points[-1]
# print(i, len(chunks))
# experimental fix for infinite loop problem
# else:
# THIS PROBLEM WASN'T HERE AT ALL. but keeping it here, it might fix the problems somwhere else:)
# can't find chunks close enough and still some chunks left
# to be sorted. For now just move the remaining chunks over to
# the sorted list.
# This fixes an infinite loop condition that occurs sometimes.
# This is a bandaid fix: need to find the root cause of this problem
# suspect it has to do with the sorted flag?
# print("no chunks found closest. Chunks not sorted: ", len(chunks))
# sortedchunks.extend(chunks)
# chunks[:] = []
i -= 1
sys.setrecursionlimit(1000)
if o.strategy != 'DRILL' and o.strategy != 'OUTLINEFILL': # THIS SHOULD AVOID ACTUALLY MOST STRATEGIES, THIS SHOULD BE DONE MANUALLY, BECAUSE SOME STRATEGIES GET SORTED TWICE.
sortedchunks = connectChunksLow(sortedchunks, o)
return sortedchunks
def getVectorRight(lastv, verts): # most right vector from a set regarding angle..
defa = 100
v1 = Vector(lastv[0])
v2 = Vector(lastv[1])
va = v2 - v1
for i, v in enumerate(verts):
if v != lastv[0]:
vb = Vector(v) - v2
a = va.angle_signed(Vector(vb))
# if a<=0:
# a=2*pi+a
if a < defa:
defa = a
returnvec = i
return returnvec
def cleanUpDict(ndict):
print('removing lonely points') # now it should delete all junk first, iterate over lonely verts.
# found_solitaires=True
# while found_solitaires:
found_solitaires = False
keys = []
keys.extend(ndict.keys())
removed = 0
for k in keys:
print(k)
print(ndict[k])
if len(ndict[k]) <= 1:
newcheck = [k]
while (len(newcheck) > 0):
v = newcheck.pop()
if len(ndict[v]) <= 1:
for v1 in ndict[v]:
newcheck.append(v)
dictRemove(ndict, v)
removed += 1
found_solitaires = True
print(removed)
def dictRemove(dict, val):
for v in dict[val]:
dict[v].remove(val)
dict.pop(val)
def addLoop(parentloop, start, end):
added = False
for l in parentloop[2]:
if l[0] < start and l[1] > end:
addLoop(l, start, end)
return
parentloop[2].append([start, end, []])
def cutloops(csource, parentloop, loops):
copy = csource[parentloop[0]:parentloop[1]]
for li in range(len(parentloop[2]) - 1, -1, -1):
l = parentloop[2][li]
# print(l)
copy = copy[:l[0] - parentloop[0]] + copy[l[1] - parentloop[0]:]
loops.append(copy)
for l in parentloop[2]:
cutloops(csource, l, loops)
def getOperationSilhouete(operation):
"""gets silhouete for the operation
uses image thresholding for everything except curves.
"""
if operation.update_silhouete_tag:
image = None
objects = None
if operation.geometry_source == 'OBJECT' or operation.geometry_source == 'COLLECTION':
if operation.onlycurves == False:
stype = 'OBJECTS'
else:
stype = 'CURVES'
else:
stype = 'IMAGE'
totfaces = 0
if stype == 'OBJECTS':
for ob in operation.objects:
if ob.type == 'MESH':
totfaces += len(ob.data.polygons)
if (stype == 'OBJECTS' and totfaces > 200000) or stype == 'IMAGE':
print('image method')
samples = renderSampleImage(operation)
if stype == 'OBJECTS':
i = samples > operation.minz - 0.0000001 # numpy.min(operation.zbuffer_image)-0.0000001##the small number solves issue with totally flat meshes, which people tend to mill instead of proper pockets. then the minimum was also maximum, and it didn't detect contour.
else:
i = samples > numpy.min(operation.zbuffer_image) # this fixes another numeric imprecision.
chunks = imageToChunks(operation, i)
operation.silhouete = chunksToShapely(chunks)
# print(operation.silhouete)
# this conversion happens because we need the silh to be oriented, for milling directions.
else:
print('object method for retrieving silhouette') #
operation.silhouete = getObjectSilhouete(stype, objects=operation.objects, use_modifiers=operation.use_modifiers)
operation.update_silhouete_tag = False
return operation.silhouete
def getObjectSilhouete(stype, objects=None, use_modifiers=False):
# o=operation
if stype == 'CURVES': # curve conversion to polygon format
allchunks = []
for ob in objects:
chunks = curveToChunks(ob)
allchunks.extend(chunks)
silhouete = chunksToShapely(allchunks)
elif stype == 'OBJECTS':
totfaces = 0
for ob in objects:
totfaces += len(ob.data.polygons)
if totfaces < 20000000: # boolean polygons method originaly was 20 000 poly limit, now limitless, it might become teribly slow, but who cares?
t = time.time()
print('shapely getting silhouette')
polys = []
for ob in objects:
if use_modifiers:
ob = ob.evaluated_get(bpy.context.evaluated_depsgraph_get())
m = ob.to_mesh()
else:
m = ob.data
mw = ob.matrix_world
mwi = mw.inverted()
r = ob.rotation_euler
m.calc_loop_triangles()
id = 0
e = 0.000001
scaleup = 100
for tri in m.loop_triangles:
n = tri.normal.copy()
n.rotate(r)
# verts=[]
# for i in f.vertices:
# verts.append(mw*m.vertices[i].co)
# n=mathutils.geometry.normal(verts[0],verts[1],verts[2])
if tri.area > 0 and n.z != 0: # n.z>0.0 and f.area>0.0 :
s = []
c = mw @ tri.center
c = c.xy
for vert_index in tri.vertices:
v = mw @ m.vertices[vert_index].co
s.append((v.x, v.y))
if len(s) > 2:
# print(s)
p = spolygon.Polygon(s)
# print(dir(p))
if p.is_valid:
# polys.append(p)
polys.append(p.buffer(e, resolution=0))
# if id==923:
# m.polygons[923].select
id += 1
if totfaces < 20000:
p = sops.unary_union(polys)
else:
print('computing in parts')
bigshapes = []
i = 1
part = 20000
while i * part < totfaces:
print(i)
ar = polys[(i - 1) * part:i * part]
bigshapes.append(sops.unary_union(ar))
i += 1
if (i - 1) * part < totfaces:
last_ar = polys[(i - 1) * part:]
bigshapes.append(sops.unary_union(last_ar))
print('joining')
p = sops.unary_union(bigshapes)
print(time.time() - t)
t = time.time()
silhouete = [p] # [polygon_utils_cam.Shapely2Polygon(p)]
return silhouete
def getAmbient(o):
if o.update_ambient_tag:
if o.ambient_cutter_restrict: # cutter stays in ambient & limit curve
m = o.cutter_diameter / 2
else:
m = 0
if o.ambient_behaviour == 'AROUND':
r = o.ambient_radius - m
o.ambient = getObjectOutline(r, o, True) # in this method we need ambient from silhouete
else:
o.ambient = spolygon.Polygon(((o.min.x + m, o.min.y + m), (o.min.x + m, o.max.y - m),
(o.max.x - m, o.max.y - m), (o.max.x - m, o.min.y + m)))
if o.use_limit_curve:
if o.limit_curve != '':
limit_curve = bpy.data.objects[o.limit_curve]
# polys=curveToPolys(limit_curve)
polys = curveToShapely(limit_curve)
o.limit_poly = shapely.ops.unary_union(polys)
# for p in polys:
# o.limit_poly+=p
if o.ambient_cutter_restrict:
o.limit_poly = o.limit_poly.buffer(o.cutter_diameter / 2, resolution=o.circle_detail)
o.ambient = o.ambient.intersection(o.limit_poly)
o.update_ambient_tag = False
def getObjectOutline(radius, o, Offset): # FIXME: make this one operation independent
# circle detail, optimize, optimize thresold.
polygons = getOperationSilhouete(o)
i = 0
# print('offseting polygons')
if Offset:
offset = 1
else:
offset = -1
outlines = []
i = 0
if o.straight: join = 2
else: join = 1
for p1 in polygons: # sort by size before this???
#print(p1.type, len(polygons))
i += 1
if radius > 0:
p1 = p1.buffer(radius * offset, resolution=o.circle_detail,join_style = join,mitre_limit=2)
outlines.append(p1)
#print(outlines)
if o.dont_merge:
outline = sgeometry.MultiPolygon(outlines)
else:
outline = shapely.ops.unary_union(outlines)
return outline
def addOrientationObject(o):
"""the orientation object should be used to set up orientations of the object for 4 and 5 axis milling."""
name = o.name + ' orientation'
s = bpy.context.scene
if s.objects.find(name) == -1:
bpy.ops.object.empty_add(type='ARROWS', align='WORLD', location=(0, 0, 0))
ob = bpy.context.active_object
ob.empty_draw_size = 0.05
ob.show_name = True
ob.name = name
ob = s.objects[name]
if o.machine_axes == '4':
if o.rotary_axis_1 == 'X':
ob.lock_rotation = [False, True, True]
ob.rotation_euler[1] = 0
ob.rotation_euler[2] = 0
if o.rotary_axis_1 == 'Y':
ob.lock_rotation = [True, False, True]
ob.rotation_euler[0] = 0
ob.rotation_euler[2] = 0
if o.rotary_axis_1 == 'Z':
ob.lock_rotation = [True, True, False]
ob.rotation_euler[0] = 0
ob.rotation_euler[1] = 0
elif o.machine_axes == '5':
ob.lock_rotation = [False, False, True]
ob.rotation_euler[2] = 0 # this will be a bit hard to rotate.....
# def addCutterOrientationObject(o):
def removeOrientationObject(o): # not working
name = o.name + ' orientation'
if bpy.context.scene.objects.find(name) > -1:
ob = bpy.context.scene.objects[name]
delob(ob)
def addTranspMat(ob, mname, color, alpha):
if mname in bpy.data.materials:
mat = bpy.data.materials[mname]
else:
mat = bpy.data.materials.new(name=mname)
mat.use_nodes = True
bsdf = mat.node_tree.nodes["Principled BSDF"]
# Assign it to object
if ob.data.materials:
ob.data.materials[0] = mat
else:
ob.data.materials.append(mat)
def addMachineAreaObject():
s = bpy.context.scene
ao = bpy.context.active_object
if s.objects.get('CAM_machine') is not None:
o = s.objects['CAM_machine']
else:
oldunits = s.unit_settings.system
# need to be in metric units when adding machine mesh object
# in order for location to work properly
s.unit_settings.system = 'METRIC'
bpy.ops.mesh.primitive_cube_add(align='WORLD', enter_editmode=False, location=(1, 1, -1), rotation=(0, 0, 0))
o = bpy.context.active_object
o.name = 'CAM_machine'
o.data.name = 'CAM_machine'
bpy.ops.object.transform_apply(location=True, rotation=False, scale=False)
# o.type = 'SOLID'
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE', action='TOGGLE')
bpy.ops.mesh.select_all(action='TOGGLE')
bpy.ops.mesh.subdivide(number_cuts=32, smoothness=0, quadcorner='STRAIGHT_CUT', fractal=0,
fractal_along_normal=0, seed=0)
bpy.ops.mesh.select_nth(nth=2, offset=0)
bpy.ops.mesh.delete(type='EDGE')
bpy.ops.mesh.primitive_cube_add(align='WORLD', enter_editmode=False, location=(1, 1, -1), rotation=(0, 0, 0))
bpy.ops.object.editmode_toggle()
# addTranspMat(o, "violet_transparent", (0.800000, 0.530886, 0.725165), 0.1)
o.display_type = 'BOUNDS'
o.hide_render = True
o.hide_select = True
# o.select = False
s.unit_settings.system = oldunits
# bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
o.dimensions = bpy.context.scene.cam_machine.working_area
if ao is not None:
ao.select_set(True)
# else:
# bpy.context.scene.objects.active = None
def addMaterialAreaObject():
s = bpy.context.scene
operation = s.cam_operations[s.cam_active_operation]
getOperationSources(operation)
getBounds(operation)
ao = bpy.context.active_object
if s.objects.get('CAM_material') is not None:
o = s.objects['CAM_material']
else:
bpy.ops.mesh.primitive_cube_add(align='WORLD', enter_editmode=False, location=(1, 1, -1), rotation=(0, 0, 0))
o = bpy.context.active_object
o.name = 'CAM_material'
o.data.name = 'CAM_material'
bpy.ops.object.transform_apply(location=True, rotation=False, scale=False)
# addTranspMat(o, 'blue_transparent', (0.458695, 0.794658, 0.8), 0.1)
o.display_type = 'BOUNDS'
o.hide_render = True
o.hide_select = True
o.select_set(state=True, view_layer=None)
# bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
o.dimensions = bpy.context.scene.cam_machine.working_area
o.dimensions = (
operation.max.x - operation.min.x, operation.max.y - operation.min.y, operation.max.z - operation.min.z)
o.location = (operation.min.x, operation.min.y, operation.max.z)
if ao is not None:
ao.select_set(True)
# else:
# bpy.context.scene.objects.active = None
def getContainer():
s = bpy.context.scene
if s.objects.get('CAM_OBJECTS') == None:
bpy.ops.object.empty_add(type='PLAIN_AXES', align='WORLD')
container = bpy.context.active_object
container.name = 'CAM_OBJECTS'
container.location = [0, 0, 0]
container.hide = True
else:
container = s.objects['CAM_OBJECTS']
return container
# progress('finished')
# tools for voroni graphs all copied from the delaunayVoronoi addon:
class Point:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def unique(L):
"""Return a list of unhashable elements in s, but without duplicates.
[[1, 2], [2, 3], [1, 2]] >>> [[1, 2], [2, 3]]"""
# For unhashable objects, you can sort the sequence and then scan from the end of the list, deleting duplicates as you go
nDupli = 0
nZcolinear = 0
L.sort() # sort() brings the equal elements together; then duplicates are easy to weed out in a single pass.
last = L[-1]
for i in range(len(L) - 2, -1, -1):
if last[:2] == L[i][:2]: # XY coordinates compararison
if last[2] == L[i][2]: # Z coordinates compararison
nDupli += 1 # duplicates vertices
else: # Z colinear
nZcolinear += 1
del L[i]
else:
last = L[i]
return (nDupli,
nZcolinear) # list data type is mutable, input list will automatically update and doesn't need to be returned
def checkEqual(lst):
return lst[1:] == lst[:-1]
def prepareIndexed(o):
s = bpy.context.scene
# first store objects positions/rotations
o.matrices = []
o.parents = []
for ob in o.objects:
o.matrices.append(ob.matrix_world.copy())
o.parents.append(ob.parent)
# then rotate them
for ob in o.objects:
ob.select = True
s.objects.active = ob
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
s.cursor_location = (0, 0, 0)
oriname = o.name + ' orientation'
ori = s.objects[oriname]
o.orientation_matrix = ori.matrix_world.copy()
o.rotationaxes = rotTo2axes(ori.rotation_euler, 'CA')
ori.select = True
s.objects.active = ori
# we parent all objects to the orientation object
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
for ob in o.objects:
ob.select = False
# then we move the orientation object to 0,0
bpy.ops.object.location_clear()
bpy.ops.object.rotation_clear()
ori.select = False
for ob in o.objects:
activate(ob)
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
# rot=ori.matrix_world.inverted()
# #rot.x=-rot.x
# #rot.y=-rot.y
# #rot.z=-rot.z
# rotationaxes = rotTo2axes(ori.rotation_euler,'CA')
#
# #bpy.context.space_data.pivot_point = 'CURSOR'
# #bpy.context.space_data.pivot_point = 'CURSOR'
#
# for ob in o.objects:
# ob.rotation_euler.rotate(rot)
def cleanupIndexed(operation):
s = bpy.context.scene
oriname = operation.name + 'orientation'
ori = s.objects[oriname]
path = s.objects["cam_path_{}{}".format(operation.name)]
ori.matrix_world = operation.orientation_matrix
# set correct path location
path.location = ori.location
path.rotation_euler = ori.rotation_euler
print(ori.matrix_world, operation.orientation_matrix)
for i, ob in enumerate(operation.objects): # TODO: fix this here wrong order can cause objects out of place
ob.parent = operation.parents[i]
for i, ob in enumerate(operation.objects):
ob.matrix_world = operation.matrices[i]
def rotTo2axes(e, axescombination):
"""converts an orientation object rotation to rotation defined by 2 rotational axes on the machine - for indexed machining.
attempting to do this for all axes combinations.
"""
v = Vector((0, 0, 1))
v.rotate(e)
# if axes
if axescombination == 'CA':
v2d = Vector((v.x, v.y))
a1base = Vector((0, -1)) # ?is this right?It should be vector defining 0 rotation
if v2d.length > 0:
cangle = a1base.angle_signed(v2d)
else:
return (0, 0)
v2d = Vector((v2d.length, v.z))
a2base = Vector((0, 1))
aangle = a2base.angle_signed(v2d)
print('angles', cangle, aangle)
return (cangle, aangle)
elif axescombination == 'CB':
v2d = Vector((v.x, v.y))
a1base = Vector((1, 0)) # ?is this right?It should be vector defining 0 rotation
if v2d.length > 0:
cangle = a1base.angle_signed(v2d)
else:
return (0, 0)
v2d = Vector((v2d.length, v.z))
a2base = Vector((0, 1))
bangle = a2base.angle_signed(v2d)
print('angles', cangle, bangle)
return (cangle, bangle)
# v2d=((v[a[0]],v[a[1]]))
# angle1=a1base.angle(v2d)#C for ca
# print(angle1)
# if axescombination[0]=='C':
# e1=Vector((0,0,-angle1))
# elif axescombination[0]=='A':#TODO: finish this after prototyping stage
# pass;
# v.rotate(e1)
# vbase=Vector(0,1,0)
# bangle=v.angle(vzbase)
# print(v)
# print(bangle)
return (angle1, angle2)
def reload_pathss(o):
oname = "cam_path_" + o.name
s = bpy.context.scene
# for o in s.objects:
ob = None
old_pathmesh = None
if oname in s.objects:
old_pathmesh = s.objects[oname].data
ob = s.objects[oname]
picklepath = getCachePath(o) + '.pickle'
f = open(picklepath, 'rb')
d = pickle.load(f)
f.close()
# passed=False
# while not passed:
# try:
# f=open(picklepath,'rb')
# d=pickle.load(f)
# f.close()
# passed=True
# except:
# print('sleep')
# time.sleep(1)
o.warnings = d['warnings']
o.duration = d['duration']
verts = d['path']
edges = []
for a in range(0, len(verts) - 1):
edges.append((a, a + 1))
oname = "cam_path_" + o.name
mesh = bpy.data.meshes.new(oname)
mesh.name = oname
mesh.from_pydata(verts, edges, [])
if oname in s.objects:
s.objects[oname].data = mesh
else:
object_utils.object_data_add(bpy.context, mesh, operator=None)
ob = bpy.context.active_object
ob.name = oname
ob = s.objects[oname]
ob.location = (0, 0, 0)
o.path_object_name = oname
o.changed = False
if old_pathmesh != None:
bpy.data.meshes.remove(old_pathmesh)
|
vilemnovak/blendercam
|
scripts/addons/cam/utils.py
|
Python
|
gpl-2.0
| 65,143
|
#!/usr/bin/env python
"""
This is a collection of classes that contain data for files from a
sosreport in the directory:
var/log/*
@author : Shane Bradley
@contact : sbradley@redhat.com
@version : 2.16
@copyright : GPLv2
"""
class VarLogMessagesMsg:
def __init__(self, orginalMessage, timestamp, hostname, messageSender, pid, message):
self.__orginalMessage = orginalMessage
self.__timestamp = timestamp
self.__hostname = hostname
self.__messageSender = messageSender
self.__pid = pid
self.__message = message
def __str__(self):
#return "%s | %s | %s | %s | %s" %(self.getTimestamp(), self.getHostname(), self.getMessageSender(), self.getPid(), self.getMessage())
return self.getOriginalMessage()
def getOriginalMessage(self):
return self.__orginalMessage
def getTimestamp(self):
return self.__timestamp
def getHostname(self):
return self.__hostname
def getMessageSender(self):
return self.__messageSender
def getPid(self):
return self.__pid
def getMessage(self):
return self.__message
|
sbradley7777/sx
|
lib/sx/plugins/lib/log/syslogparser.py
|
Python
|
gpl-2.0
| 1,078
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""B2share records errors."""
from jsonschema.exceptions import ValidationError
from invenio_rest.errors import RESTValidationError, FieldError
class B2ShareRecordsError(RESTValidationError):
"""B2Share records error."""
class InvalidRecordError(B2ShareRecordsError):
"""Raise when a record is invalid."""
# TODO(edima): remove this when we have support for permissions
class AlteredRecordError(B2ShareRecordsError):
"""Raise when a record update changes what is considered
immutable record data."""
class EpicPIDError(Exception):
"""Raise when a record has no community."""
class UnknownRecordType(B2ShareRecordsError):
"""Error raised when a record type cannot be determined.
The two main record types are "published record" and "deposit".
"""
class AnonymousDepositSearch(B2ShareRecordsError):
"""Error raised when an anonymous user tries to search for drafts."""
code = 401
description = 'Only authenticated users can search for drafts.'
def register_error_handlers(app):
@app.errorhandler(ValidationError)
def handle_validation_error(err):
field = '/'.join([str(x) for x in err.path])
if err.validator == 'required' or err.validator == 'additionalProperties':
try:
field = err.message.split('\'')[1]
except IndexError:
pass # ignore
return InvalidRecordError(errors=[
FieldError(field, err.message)
])
|
SarahBA/b2share
|
b2share/modules/records/errors.py
|
Python
|
gpl-2.0
| 2,455
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import Selector
from lyricwiki.items import LyricWikiItem
class LyricWikiSpider(CrawlSpider):
name = "mastakilla" #CHANGE NAME
allowed_domains = ["lyrics.wikia.com"]
start_urls = [
"http://lyrics.wikia.com/Masta_Killa", #CHANGE URL
]
rules = ( #CHANGE REGEX
Rule(SgmlLinkExtractor(allow=('/Masta_Killa.*',),restrict_xpaths=('//ol/li',)), callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
info = sel.xpath('//div[@class="mw-content-ltr"]')
item = LyricWikiItem()
item['title'] = sel.xpath('//header[@id="WikiaPageHeader"]/h1/text()').extract()
item['artist'] = info.xpath('b/a/text()').extract()
item['album'] = info.xpath('i/a/text()').extract()
item['lyrics'] = sel.xpath('//div[@class="lyricbox"]/text()').extract()
return item
|
elainekmao/hiphoptextanalysis
|
lyricwiki-scraper/lyricwiki/spiders/mastakilla_spider.py
|
Python
|
gpl-2.0
| 1,143
|
from .Gender import Gender
from .Profile import Profile
from .Measurement import Measurement
from .MeasurementSource import MeasurementSource
from .MeasurementType import MeasurementType
from .MeasurementUnit import MeasurementUnit
|
coco19/salud-api
|
app/mod_profiles/models/__init__.py
|
Python
|
gpl-2.0
| 232
|
#!/usr/bin/env python
__author__ = "Devin Kelly"
import pymongo
import time
import json
import re
import os
import daemon
from datetime import datetime
from tornado import httpclient, ioloop
def parseHTML(htmlData):
expr = re.compile("In current traffic: [0-9]{0,2} mins")
matches = re.finditer(expr, htmlData)
trafficData = []
for ii in matches:
tmpData = {}
s = re.sub('<[^<]+?>', '', htmlData[ii.start(0): ii.start(0) + 180])
s = re.sub("<.*$", '', s)
(travelTime, route) = s.split('mins')
route = re.sub("^\s*", "", route)
route = re.sub("\s*$", "", route)
tmpData["route"] = route
travelTime = re.sub("^.*:\s*", "", travelTime)
tmpData["time"] = travelTime
trafficData.append(tmpData)
return trafficData
def insertData(coll, data):
timestamp = time.time()
for trip in data:
coll.insert({"commuteTime": trip['time'], "timestamp": timestamp, "route": trip['route']})
def getWeekdayCommuteTimeFunction(coll, toAddr, fromAddr, startHour, endHour):
toAddr = toAddr.replace(" ", "+")
fromAddr = fromAddr.replace(" ", "+")
url = "https://maps.google.com/maps?saddr={0}&daddr={1}&hl=en".format(toAddr, fromAddr)
def weekdayCommuteTime():
now = time.time()
dt = datetime.fromtimestamp(now)
if dt.weekday() > 4:
return
if dt.hour < startHour or dt.hour > endHour:
return
http_client = httpclient.HTTPClient()
print 'fetching'
try:
response = http_client.fetch(url)
trafficData = parseHTML(response.body)
print trafficData
insertData(coll, trafficData)
except httpclient.HTTPError as e:
print "Error:", e
http_client.close()
return weekdayCommuteTime
def main():
# Setup DB
dbName = "traffic"
cli = pymongo.MongoClient()
db = cli[dbName]
# Read Config File
with open("trafficConfig.json") as fd:
config = json.loads(fd.read())
home = config["home"]
work = config["work"]
interval = config["interval_ms"]
# Setup IO Loop
callbacks = []
io_loop = ioloop.IOLoop.instance()
# morning commute
startHour = 6
endHour = 11
coll = db["morning"]
F1 = getWeekdayCommuteTimeFunction(coll, home, work, startHour, endHour)
callbacks.append(ioloop.PeriodicCallback(F1, interval, io_loop))
# afternoon commute
startHour = 15
endHour = 23
coll = db["afternoon"]
F2 = getWeekdayCommuteTimeFunction(coll, work, home, startHour, endHour)
callbacks.append(ioloop.PeriodicCallback(F2, interval, io_loop))
# Start callbacks
[ii.start() for ii in callbacks]
# Start IO Loop
io_loop.start()
return
if __name__ == "__main__":
pwd = os.getcwd()
with daemon.DaemonContext(working_directory=pwd):
main()
|
dwwkelly/trafficDownloader
|
trafficDownloader.py
|
Python
|
gpl-2.0
| 2,828
|
#!/usr/bin/env python
from tempfile import NamedTemporaryFile
from django.contrib.sites.models import Site
from django.template.loader import render_to_string
from django.utils import translation
import re, subprocess
import cStringIO as StringIO
from django.conf import settings
WETO_REQUEST_FORMAT_NAME = getattr(settings, 'WETO_REQUEST_FORMAT_NAME', 'format')
WETO_REQUEST_FORMAT_PDF_VALUE = getattr(settings, 'WETO_REQUEST_FORMAT_PDF_VALUE', 'pdf')
WETO_LIB_PATH = getattr(settings, 'WETO_LIB_PATH', '/usr/bin/wkhtmltopdf')
WETO_OPTS = getattr(settings, 'WETO_OPTS', ["--dpi", "600", "--page-size", "A4"])
DEBUG = getattr(settings, 'DEBUG', False)
def replace_relative_with_absolute_links(site_url, content):
# replace urls with absolute urls including site and ssl/non-ssl
content = re.sub(r'href="/', r'href="%s/' % site_url, content)
content = re.sub(r'src="/', r'src="%s/' % site_url, content)
# replace relative urls with absolute urls including site and ssl/non-ssl,
# not sure if this really works this way...
content = re.sub(r'href="!http', r'href="%s/' % site_url, content)
content = re.sub(r'src="!http', r'src="%s/' % site_url, content)
return content
def transform_to_pdf(response, request):
toc = request.GET.get("toc", None)
footer = request.GET.get("footer", None)
header = request.GET.get("header", None)
pdf_name = request.GET.get("pdf_name", "report.pdf")
response['mimetype'] = 'application/pdf'
response['Content-Disposition'] = 'attachment; filename=%s.pdf' % pdf_name
content = response.content
# TODO: Make this more stable and less a hack
site_url = u"https://" if request.is_secure() else u"http://"
current_site = Site.objects.get_current()
site_url += current_site.domain
site_url = str(site_url)
content = replace_relative_with_absolute_links(site_url, content)
string_content = StringIO.StringIO(content)
popen_command = [WETO_LIB_PATH,] + WETO_OPTS
language = translation.get_language()
if header:
header_file = NamedTemporaryFile(suffix='.html')
header = render_to_string('weto/pdf_header.html', request)
header_file.write(replace_relative_with_absolute_links(site_url, header))
header_file.flush()
header_file.seek(0)
popen_command += ['--header-html', header_file.name]
if footer:
footer_file = NamedTemporaryFile(suffix='.html')
footer = render_to_string('weto/pdf_footer.html', request)
footer_file.write(replace_relative_with_absolute_links(site_url, footer))
footer_file.flush()
footer_file.seek(0)
popen_command += ['--footer-html', footer_file.name]
if toc:
toc_file = NamedTemporaryFile()
popen_command += ["toc"]
if toc != "default":
rendered = render_to_string('weto/toc_xsl.xml', request)
if getattr(settings, 'USE_I18N'):
toc_file.write(rendered.translate(language))
else:
toc_file.write(rendered)
toc_file.flush()
toc_file.seek(0)
popen_command += ['--xsl-style-sheet', toc_file.name]
popen_command += [ "-", "-"]
if DEBUG: # show errors on stdout
sub = subprocess.Popen(popen_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
else:
sub = subprocess.Popen(popen_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
string_content.flush()
string_content.seek(0)
pdf = sub.communicate(input=string_content.read())
string_content.close()
# don't know why, but I need to first remove the content, before writing to it, else it appends the content
response.content = ''
response.write(pdf[0])
if header:
header_file.close()
if toc:
toc_file.close()
if footer:
footer_file.close()
return response
class PdfMiddleware(object):
"""
Converts the response to a pdf one.
"""
def process_response(self, request, response):
format = request.GET.get(WETO_REQUEST_FORMAT_NAME, None)
if format == WETO_REQUEST_FORMAT_PDF_VALUE:
response = transform_to_pdf(response, request)
return response
|
hixi/django-weto
|
weto/middleware.py
|
Python
|
gpl-2.0
| 4,312
|
__all__ = ["pval_task", "annotation_task"]
|
eeyorkey/ipac
|
tasks/__init__.py
|
Python
|
gpl-2.0
| 42
|
#!/usr/bin/env python
#
# This is only needed for Python v2 but is harmless for Python v3.
#
import PyQt5.sip as sip
sip.setapi('QString', 2)
#
from PyQt5 import QtCore, QtGui, QtWidgets
#
import sys
import os
import sqlite3 as sqlite
#
# this is needed for me to use unpickle objects
#
sys.path.append(os.path.join(os.getcwd(), 'Generic'))
genericPath = sys.path[len(sys.path) - 1]
sys.path.append(os.path.join(genericPath, 'Kernel'))
sys.path.append(os.path.join(genericPath, 'Interface'))
#
from Interface.cadwindow import CadWindowMdi
def getPythonCAD():
app = QtWidgets.QApplication(sys.argv)
# Splash screen
splashPath = os.path.join(os.getcwd(), 'icons', 'splashScreen1.png')
splash_pix = QtGui.QPixmap(splashPath)
splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
splash.show()
w = CadWindowMdi()
w.show()
# End of splash screen
splash.finish(w)
return w, app
if __name__ == '__main__':
w, app = getPythonCAD()
sys.exit(app.exec_())
|
Csega/PythonCAD3
|
pythoncad_qt.py
|
Python
|
gpl-2.0
| 1,067
|
# DO THIS FIRST to set project name !!!
import askapdev.sphinx
# CAN NOT contain spaces!
askapdev.sphinx.project = u'askap.parset'
from askapdev.sphinx.conf import *
version = 'current'
release = 'current'
|
ATNF/askapsdp
|
Code/Base/py-askap/current/doc/conf.py
|
Python
|
gpl-2.0
| 352
|
from django.contrib.auth.decorators import login_required, permission_required,\
user_passes_test
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from project.tramitacao.forms import FormTipoCaixa
from django.http import HttpResponseRedirect
from project.tramitacao.models import Tbcontratoservidor
from django.contrib import messages
from project.tramitacao.admin import verificar_permissao_grupo
from django.http.response import HttpResponse
from project.tramitacao.relatorio_base import relatorio_csv_base, relatorio_ods_base,\
relatorio_ods_base_header, relatorio_pdf_base,\
relatorio_pdf_base_header_title, relatorio_pdf_base_header
from odslib import ODS
nome_relatorio = "relatorio_tipo_caixa"
response_consulta = "/sicop/restrito/tipo_caixa/consulta/"
titulo_relatorio = "Relatorio dos Tipos de Caixa"
planilha_relatorio = "Tipos de Caixa"
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def consulta(request):
if request.method == "POST":
nome = request.POST['nmcontrato']
lista = Tbcontratoservidor.objects.all()#.filter( nmtipocaixa__icontains=nome, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
else:
lista = Tbcontratoservidor.objects.all()
lista = lista.order_by( 'nmcontrato' )
#gravando na sessao o resultado da consulta preparando para o relatorio/pdf
request.session['relatorio_tipo_caixa'] = lista
return render_to_response('controle/servidor/contratoservidor/consulta.html' ,{'lista':lista}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_cadastro', login_url='/excecoes/permissao_negada/', raise_exception=True)
def cadastro(request):
if request.method == "POST":
next = request.GET.get('next', '/')
if validacao(request):
f_tipocaixa = Tbtipocaixa(
nmtipocaixa = request.POST['nmtipocaixa'],
desctipocaixa = request.POST['desctipocaixa'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipocaixa.save()
if next == "/":
return HttpResponseRedirect("/sicop/restrito/tipo_caixa/consulta/")
else:
return HttpResponseRedirect( next )
return render_to_response('sicop/restrito/tipo_caixa/cadastro.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_edicao', login_url='/excecoes/permissao_negada/', raise_exception=True)
def edicao(request, id):
instance = get_object_or_404(Tbtipocaixa, id=id)
if request.method == "POST":
if validacao(request):
f_tipocaixa = Tbtipocaixa(
id = instance.id,
nmtipocaixa = request.POST['nmtipocaixa'],
desctipocaixa = request.POST['desctipocaixa'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipocaixa.save()
return HttpResponseRedirect("/sicop/restrito/tipo_caixa/edicao/"+str(id)+"/")
return render_to_response('sicop/restrito/tipo_caixa/edicao.html', {"tipocaixa":instance}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_pdf(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(mimetype='application/pdf')
doc = relatorio_pdf_base_header(response, nome_relatorio)
elements=[]
dados = relatorio_pdf_base_header_title(titulo_relatorio)
dados.append( ('NOME','DESCRICAO') )
for obj in lista:
dados.append( ( obj.nmtipocaixa , obj.desctipocaixa ) )
return relatorio_pdf_base(response, doc, elements, dados)
else:
return HttpResponseRedirect(response_consulta)
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_ods(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, ods)
# subtitle
sheet.getCell(0, 1).setAlignHorizontal('center').stringValue( 'Nome' ).setFontSize('14pt')
sheet.getCell(1, 1).setAlignHorizontal('center').stringValue( 'Descricao' ).setFontSize('14pt')
sheet.getRow(1).setHeight('20pt')
#TRECHO PERSONALIZADO DE CADA CONSULTA
#DADOS
x = 0
for obj in lista:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.nmtipocaixa)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.desctipocaixa)
x += 1
#TRECHO PERSONALIZADO DE CADA CONSULTA
relatorio_ods_base(ods, planilha_relatorio)
# generating response
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods'
ods.save(response)
return response
else:
return HttpResponseRedirect( response_consulta )
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_csv(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(content_type='text/csv')
writer = relatorio_csv_base(response, nome_relatorio)
writer.writerow(['Nome', 'Descricao'])
for obj in lista:
writer.writerow([obj.nmtipocaixa, obj.desctipocaixa])
return response
else:
return HttpResponseRedirect( response_consulta )
def validacao(request_form):
warning = True
if request_form.POST['nmtipocaixa'] == '':
messages.add_message(request_form,messages.WARNING,'Informe um nome para o tipo caixa')
warning = False
return warning
|
waldenilson/TerraLegal
|
project/servidor/restrito/contratoservidor.py
|
Python
|
gpl-2.0
| 6,591
|
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from twisted.internet import reactor
from twisted.internet import protocol
class EchoProtocol(protocol.Protocol):
def dataReceived(self, data):
response = self.factory.app.handle_message(data)
if response:
self.transport.write(response)
class EchoFactory(protocol.Factory):
protocol = EchoProtocol
def __init__(self, app):
self.app = app
from kivy.app import App
from kivy.uix.label import Label
class TwistedServerApp(App):
def build(self):
self.label = Label(text="server started\n")
reactor.listenTCP(8000, EchoFactory(self))
return self.label
def handle_message(self, msg):
self.label.text = "received: %s\n" % msg
if msg == "ping":
msg = "pong"
if msg == "plop":
msg = "kivy rocks"
self.label.text += "responded: %s\n" % msg
return msg
if __name__ == '__main__':
TwistedServerApp().run()
|
cmac4603/Home-Utilities-App
|
reactor_server.py
|
Python
|
gpl-2.0
| 1,031
|
import sys
from geopy import Point
from django.apps import apps as django_apps
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from .geo_mixin import GeoMixin
LANDMARK_NAME = 0
LATITUDE = 2
LETTERS = list(map(chr, range(65, 91)))
LONGITUDE = 1
style = color_style()
class Mapper(GeoMixin):
center_lat = None
center_lon = None
landmarks = None # format ((name, longitude, latitude), )
map_area = None
radius = 5.5
mapper_model = None
def __init__(self):
self.name = self.map_area or f'mapper {self.__class__.__name__}'
app_config = django_apps.get_app_config('edc_map')
mapper_model = self.mapper_model or app_config.mapper_model
if not mapper_model:
raise ImproperlyConfigured(
f'Invalid mapper_model. Got None. See {repr(self)}.')
try:
self.item_model = django_apps.get_model(*mapper_model.split('.'))
except LookupError as e:
sys.stdout.write(style.WARNING(
f'\n Warning. Lookup error in mapper. See {repr(self)}. Got {e} '
'edc_map.apps.AppConfig\n'))
else:
self.item_model_cls = self.item_model
self.item_label = self.item_model._meta.verbose_name
self.load()
def __repr__(self):
return 'Mapper({0.map_area!r})'.format(self)
def __str__(self):
return '({0.map_area!r})'.format(self)
def load(self):
return None
@property
def __dict__(self):
return {
'map_area': self.map_area,
'center_lat': self.center_lat,
'center_lon': self.center_lon,
'radius': self.radius}
@property
def area_center_point(self):
return Point(self.center_lat, self.center_lon)
@property
def area_radius(self):
return self.radius
def point_in_map_area(self, point):
"""Return True if point is within mapper area radius."""
return self.point_in_radius(
point, self.area_center_point, self.area_radius)
def raise_if_not_in_map_area(self, point):
self.raise_if_not_in_radius(
point, self.area_center_point, self.area_radius,
units='km', label=self.map_area)
|
botswana-harvard/edc-map
|
edc_map/mapper.py
|
Python
|
gpl-2.0
| 2,309
|
from __future__ import division
import numpy as np
from fatiando.gravmag.euler import Classic, ExpandingWindow, MovingWindow
from fatiando.gravmag import sphere, fourier
from fatiando.mesher import Sphere
from fatiando import utils, gridder
model = None
xp, yp, zp = None, None, None
inc, dec = None, None
struct_ind = None
base = None
pos = None
field, xderiv, yderiv, zderiv = None, None, None, None
precision = 0.01
def setup():
global model, x, y, z, inc, dec, struct_ind, field, xderiv, yderiv, \
zderiv, base, pos
inc, dec = -30, 50
pos = np.array([1000, 1000, 200])
model = Sphere(pos[0], pos[1], pos[2], 1,
#{'magnetization':utils.ang2vec(100, 25, -10)})
{'magnetization':10000})
struct_ind = 3
shape = (128, 128)
x, y, z = gridder.regular((0, 3000, 0, 3000), shape, z=-1)
base = 10
field = utils.nt2si(sphere.tf(x, y, z, [model], inc, dec)) + base
xderiv = fourier.derivx(x, y, field, shape)
yderiv = fourier.derivy(x, y, field, shape)
zderiv = fourier.derivz(x, y, field, shape)
def test_euler_classic_sphere_mag():
"gravmag.euler.Classic for sphere model and magnetic data"
euler = Classic(x, y, z, field, xderiv, yderiv, zderiv, struct_ind).fit()
assert (base - euler.baselevel_)/base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
def test_euler_classic_expandingwindow_sphere_mag():
"gravmag.euler.ExpandingWindow w Classic for sphere model + magnetic data"
euler = ExpandingWindow(
Classic(x, y, z, field, xderiv, yderiv, zderiv, struct_ind),
center=[1000, 1000], sizes=np.linspace(100, 2000, 20)).fit()
assert (base - euler.baselevel_)/base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
def test_euler_classic_movingwindow_sphere_mag():
"gravmag.euler.MovingWindow w Classic for sphere model + magnetic data"
euler = MovingWindow(
Classic(x, y, z, field, xderiv, yderiv, zderiv, struct_ind),
windows=[10, 10], size=(1000, 1000), keep=0.2).fit()
for b in euler.baselevel_:
assert (base - b)/base <= precision, \
'baselevel: %g estimated: %g' % (base, b)
for c in euler.estimate_:
assert np.all((pos - c)/pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(c))
|
seancug/python-example
|
fatiando-0.2/test/test_gravmag_euler.py
|
Python
|
gpl-2.0
| 2,686
|
'''
Created on Jan 6, 2013
__author__ = "Elizabeth 'pidge' Flanagan"
__copyright__ = "Copyright 2012-2013, Intel Corp."
__credits__ = ["Elizabeth Flanagan"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Elizabeth Flanagan"
__email__ = "elizabeth.flanagan@intel.com"
'''
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LogLineObserver
from distutils.version import StrictVersion
import os
class BuildImages(ShellCommand):
haltOnFailure = False
flunkOnFailure = True
name = "BuildImages"
def __init__(self, factory, argdict=None, **kwargs):
self.layerversion_yoctobsp=None
self.machine=""
self.images=""
self._pendingLogObservers = []
self.factory = factory
for k, v in argdict.iteritems():
setattr(self, k, v)
# Timeout needs to be passed to LoggingBuildStep as a kwarg
self.timeout = 100000
kwargs['timeout']=self.timeout
ShellCommand.__init__(self, **kwargs)
def start(self):
self.layerversion_yoctobsp = self.getProperty("layerversion_yoctobsp")
self.layerversion_core = self.getProperty("layerversion_core")
self.machine = self.getProperty("MACHINE")
# core-image-basic rename
# See: http://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?id=b7f1cca517bbd4191828c6bae32e0c5041f1ff19
# I hate making people change their configs, so support both.
if self.layerversion_core < "4":
self.images=self.images.replace("core-image-full-cmdline", "core-image-basic")
else:
self.images=self.images.replace("core-image-basic", "core-image-full-cmdline")
if self.layerversion_yoctobsp is not None and int(self.layerversion_yoctobsp) < 2 and self.machine is not None and self.machine == "genericx86-64":
self.command = "echo 'Skipping Step.'"
else:
bitbakeflags = "-k "
# -w only exists in bitbake 1.25 and newer, use distroversion string and make sure we're on poky >1.7
if self.getProperty('bitbakeversion') and StrictVersion(self.getProperty('bitbakeversion')) >= StrictVersion("1.25"):
bitbakeflags += "-w "
self.command = ". ./oe-init-build-env; bitbake " + bitbakeflags + self.images
self.description = ["Building " + str(self.images)]
ShellCommand.start(self)
def describe(self, done=False):
description = ShellCommand.describe(self, done)
if self.layerversion_yoctobsp is not None and int(self.layerversion_yoctobsp) < 2 and self.machine is not None and self.machine == "genericx86-64":
description.append("genericx86-64 does not exist in this branch. Skipping")
return description
|
joeythesaint/yocto-autobuilder
|
lib/python2.7/site-packages/autobuilder/buildsteps/BuildImages.py
|
Python
|
gpl-2.0
| 2,787
|
#
# commands.py - the GraalVM specific commands
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, sys, shutil, zipfile, tempfile, re, time, datetime, platform, subprocess, multiprocessing
from os.path import join, exists, dirname, basename, getmtime
from argparse import ArgumentParser, REMAINDER
import mx
import sanitycheck
import itertools
import json, textwrap
# This works because when mx loads this file, it makes sure __file__ gets an absolute path
_graal_home = dirname(dirname(__file__))
""" Used to distinguish an exported GraalVM (see 'mx export'). """
_vmSourcesAvailable = exists(join(_graal_home, 'make')) and exists(join(_graal_home, 'src'))
""" The VMs that can be built and run along with an optional description. Only VMs with a
description are listed in the dialogue for setting the default VM (see _get_vm()). """
_vmChoices = {
'graal' : 'All compilation is performed with Graal. This includes bootstrapping Graal itself unless -XX:-BootstrapGraal is used.',
'server' : 'Normal compilation is performed with the tiered system (i.e., client + server), Truffle compilation is performed with Graal. Use this for optimal Truffle performance.',
'client' : None, # normal compilation with client compiler, explicit compilation (e.g., by Truffle) with Graal
'server-nograal' : None, # all compilation with tiered system (i.e., client + server), Graal omitted
'client-nograal' : None, # all compilation with client compiler, Graal omitted
'original' : None, # default VM copied from bootstrap JDK
}
""" The VM that will be run by the 'vm' command and built by default by the 'build' command.
This can be set via the global '--vm' option or the DEFAULT_VM environment variable.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vm = None
""" The VM builds that will be run by the 'vm' command - default is first in list """
_vmbuildChoices = ['product', 'fastdebug', 'debug', 'optimized']
""" The VM build that will be run by the 'vm' command.
This can be set via the global '--vmbuild' option.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vmbuild = _vmbuildChoices[0]
_jacoco = 'off'
""" The current working directory to switch to before running the VM. """
_vm_cwd = None
""" The base directory in which the JDKs cloned from $JAVA_HOME exist. """
_installed_jdks = None
""" Prefix for running the VM. """
_vm_prefix = None
_make_eclipse_launch = False
_minVersion = mx.JavaVersion('1.7.0_04')
def _get_vm():
"""
Gets the configured VM, presenting a dialogue if there is no currently configured VM.
"""
global _vm
if _vm:
return _vm
vm = mx.get_env('DEFAULT_VM')
if vm is None:
if not sys.stdout.isatty():
mx.abort('Need to specify VM with --vm option or DEFAULT_VM environment variable')
envPath = join(_graal_home, 'mx', 'env')
mx.log('Please select the VM to be executed from the following: ')
items = [k for k in _vmChoices.keys() if _vmChoices[k] is not None]
descriptions = [_vmChoices[k] for k in _vmChoices.keys() if _vmChoices[k] is not None]
vm = mx.select_items(items, descriptions, allowMultiple=False)
answer = raw_input('Persist this choice by adding "DEFAULT_VM=' + vm + '" to ' + envPath + '? [Yn]: ')
if not answer.lower().startswith('n'):
with open(envPath, 'a') as fp:
print >> fp, 'DEFAULT_VM=' + vm
_vm = vm
return vm
"""
A context manager that can be used with the 'with' statement to set the VM
used by all VM executions within the scope of the 'with' statement. For example:
with VM('server'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, vm=None, build=None):
assert vm is None or vm in _vmChoices.keys()
assert build is None or build in _vmbuildChoices
self.vm = vm if vm else _vm
self.build = build if build else _vmbuild
self.previousVm = _vm
self.previousBuild = _vmbuild
def __enter__(self):
global _vm, _vmbuild
_vm = self.vm
_vmbuild = self.build
def __exit__(self, exc_type, exc_value, traceback):
global _vm, _vmbuild
_vm = self.previousVm
_vmbuild = self.previousBuild
def _chmodDir(chmodFlags, dirname, fnames):
os.chmod(dirname, chmodFlags)
for name in fnames:
os.chmod(os.path.join(dirname, name), chmodFlags)
def chmodRecursive(dirname, chmodFlags):
os.path.walk(dirname, _chmodDir, chmodFlags)
def clean(args):
"""clean the GraalVM source tree"""
opts = mx.clean(args, parser=ArgumentParser(prog='mx clean'))
if opts.native:
def rmIfExists(name):
if os.path.isdir(name):
shutil.rmtree(name)
elif os.path.isfile(name):
os.unlink(name)
rmIfExists(join(_graal_home, 'build'))
rmIfExists(join(_graal_home, 'build-nograal'))
rmIfExists(_jdksDir())
rmIfExists(mx.distribution('GRAAL').path)
def export(args):
"""create a GraalVM zip file for distribution"""
parser = ArgumentParser(prog='mx export');
parser.add_argument('--omit-vm-build', action='store_false', dest='vmbuild', help='omit VM build step')
parser.add_argument('--omit-dist-init', action='store_false', dest='distInit', help='omit class files and IDE configurations from distribution')
parser.add_argument('zipfile', nargs=REMAINDER, metavar='zipfile')
args = parser.parse_args(args)
tmp = tempfile.mkdtemp(prefix='tmp', dir=_graal_home)
if args.vmbuild:
# Make sure the product VM binary is up to date
with VM(vmbuild='product'):
build([])
mx.log('Copying Java sources and mx files...')
mx.run(('hg archive -I graal -I mx -I mxtool -I mx.sh ' + tmp).split())
# Copy the GraalVM JDK
mx.log('Copying GraalVM JDK...')
src = _jdk()
dst = join(tmp, basename(src))
shutil.copytree(src, dst)
zfName = join(_graal_home, 'graalvm-' + mx.get_os() + '.zip')
zf = zipfile.ZipFile(zfName, 'w')
for root, _, files in os.walk(tmp):
for f in files:
name = join(root, f)
arcname = name[len(tmp) + 1:]
zf.write(join(tmp, name), arcname)
# create class files and IDE configurations
if args.distInit:
mx.log('Creating class files...')
mx.run('mx build'.split(), cwd=tmp)
mx.log('Creating IDE configurations...')
mx.run('mx ideinit'.split(), cwd=tmp)
# clean up temp directory
mx.log('Cleaning up...')
shutil.rmtree(tmp)
mx.log('Created distribution in ' + zfName)
def _run_benchmark(args, availableBenchmarks, runBenchmark):
vmOpts, benchmarksAndOptions = _extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
if availableBenchmarks is None:
harnessArgs = benchmarksAndOptions
return runBenchmark(None, harnessArgs, vmOpts)
if len(benchmarksAndOptions) == 0:
mx.abort('at least one benchmark name or "all" must be specified')
benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
harnessArgs = benchmarksAndOptions[len(benchmarks):]
if 'all' in benchmarks:
benchmarks = availableBenchmarks
else:
for bm in benchmarks:
if bm not in availableBenchmarks:
mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
failed = []
for bm in benchmarks:
if not runBenchmark(bm, harnessArgs, vmOpts):
failed.append(bm)
if len(failed) != 0:
mx.abort('Benchmark failures: ' + str(failed))
def dacapo(args):
"""run one or more DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
def scaladacapo(args):
"""run one or more Scala DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getScalaDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
def _arch():
machine = platform.uname()[4]
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v']:
return 'sparc'
if machine == 'i386' and mx.get_os() == 'darwin':
try:
# Support for Snow Leopard and earlier version of MacOSX
if subprocess.check_output(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
mx.abort('unknown or unsupported architecture: os=' + mx.get_os() + ', machine=' + machine)
def _vmLibDirInJdk(jdk):
"""
Get the directory within a JDK where the server and client
subdirectories are located.
"""
if platform.system() == 'Darwin':
return join(jdk, 'jre', 'lib')
if platform.system() == 'Windows':
return join(jdk, 'jre', 'bin')
return join(jdk, 'jre', 'lib', _arch())
def _vmCfgInJdk(jdk):
"""
Get the jvm.cfg file.
"""
if platform.system() == 'Windows':
return join(jdk, 'jre', 'lib', _arch(), 'jvm.cfg')
return join(_vmLibDirInJdk(jdk), 'jvm.cfg')
def _jdksDir():
return os.path.abspath(join(_installed_jdks if _installed_jdks else _graal_home, 'jdk' + str(mx.java().version)))
def _handle_missing_VM(bld, vm):
mx.log('The ' + bld + ' ' + vm + ' VM has not been created')
if sys.stdout.isatty():
answer = raw_input('Build it now? [Yn]: ')
if not answer.lower().startswith('n'):
with VM(vm, bld):
build([])
return
mx.abort('You need to run "mx --vm ' + vm + '--vmbuild ' + bld + ' build" to build the selected VM')
def _jdk(build='product', vmToCheck=None, create=False, installGraalJar=True):
"""
Get the JDK into which Graal is installed, creating it first if necessary.
"""
jdk = join(_jdksDir(), build)
if create:
srcJdk = mx.java().jdk
jdkContents = ['bin', 'include', 'jre', 'lib']
if exists(join(srcJdk, 'db')):
jdkContents.append('db')
if mx.get_os() != 'windows' and exists(join(srcJdk, 'man')):
jdkContents.append('man')
if not exists(jdk):
mx.log('Creating ' + jdk + ' from ' + srcJdk)
os.makedirs(jdk)
for d in jdkContents:
src = join(srcJdk, d)
dst = join(jdk, d)
if not exists(src):
mx.abort('Host JDK directory is missing: ' + src)
shutil.copytree(src, dst)
# Make a copy of the default VM so that this JDK can be
# reliably used as the bootstrap for a HotSpot build.
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
defaultVM = None
jvmCfgLines = []
with open(jvmCfg) as f:
for line in f:
if line.startswith('-') and defaultVM is None:
parts = line.split()
assert len(parts) == 2, parts
assert parts[1] == 'KNOWN', parts[1]
defaultVM = parts[0][1:]
jvmCfgLines += ['# default VM is a copy of the unmodified ' + defaultVM + ' VM\n']
jvmCfgLines += ['-original KNOWN\n']
else:
jvmCfgLines += [line]
assert defaultVM is not None, 'Could not find default VM in ' + jvmCfg
if mx.get_os() != 'windows':
chmodRecursive(jdk, 0755)
shutil.move(join(_vmLibDirInJdk(jdk), defaultVM), join(_vmLibDirInJdk(jdk), 'original'))
with open(jvmCfg, 'w') as fp:
for line in jvmCfgLines:
fp.write(line)
# Install a copy of the disassembler library
try:
hsdis([], copyToDir=_vmLibDirInJdk(jdk))
except SystemExit:
pass
else:
if not exists(jdk):
if _installed_jdks and mx._opts.verbose:
mx.log("Could not find JDK directory at " + jdk)
_handle_missing_VM(build, vmToCheck if vmToCheck else 'graal')
if installGraalJar:
_installGraalJarInJdks(mx.distribution('GRAAL'))
if vmToCheck is not None:
jvmCfg = _vmCfgInJdk(jdk)
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == '-' + vmToCheck + ' KNOWN':
found = True
break
if not found:
_handle_missing_VM(build, vmToCheck)
return jdk
def _installGraalJarInJdks(graalDist):
graalJar = graalDist.path
graalOptions = join(_graal_home, 'graal.options')
jdks = _jdksDir()
if exists(jdks):
for e in os.listdir(jdks):
jreLibDir = join(jdks, e, 'jre', 'lib')
if exists(jreLibDir):
# do a copy and then a move to get atomic updating (on Unix) of graal.jar in the JRE
fd, tmp = tempfile.mkstemp(suffix='', prefix='graal.jar', dir=jreLibDir)
shutil.copyfile(graalJar, tmp)
os.close(fd)
shutil.move(tmp, join(jreLibDir, 'graal.jar'))
if exists(graalOptions):
shutil.copy(graalOptions, join(jreLibDir, 'graal.options'))
# run a command in the windows SDK Debug Shell
def _runInDebugShell(cmd, workingDir, logFile=None, findInOutput=None, respondTo={}):
newLine = os.linesep
STARTTOKEN = 'RUNINDEBUGSHELL_STARTSEQUENCE'
ENDTOKEN = 'RUNINDEBUGSHELL_ENDSEQUENCE'
winSDK = mx.get_env('WIN_SDK', 'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\')
if not exists(winSDK):
mx.abort("Could not find Windows SDK : '" + winSDK + "' does not exist")
if not exists(join(winSDK, 'Bin', 'SetEnv.cmd')):
mx.abort("Invalid Windows SDK path (" + winSDK + ") : could not find Bin/SetEnv.cmd (you can use the WIN_SDK environment variable to specify an other path)")
p = subprocess.Popen('cmd.exe /E:ON /V:ON /K ""' + winSDK + '/Bin/SetEnv.cmd" & echo ' + STARTTOKEN + '"', \
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
stdout = p.stdout
stdin = p.stdin
if logFile:
log = open(logFile, 'w')
ret = False
while True:
# encoding may be None on windows plattforms
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
line = stdout.readline().decode(encoding)
if logFile:
log.write(line.encode('utf-8'))
line = line.strip()
mx.log(line)
if line == STARTTOKEN:
stdin.write('cd /D ' + workingDir + ' & ' + cmd + ' & echo ' + ENDTOKEN + newLine)
for regex in respondTo.keys():
match = regex.search(line)
if match:
stdin.write(respondTo[regex] + newLine)
if findInOutput:
match = findInOutput.search(line)
if match:
ret = True
if line == ENDTOKEN:
if not findInOutput:
stdin.write('echo ERRXXX%errorlevel%' + newLine)
else:
break
if line.startswith('ERRXXX'):
if line == 'ERRXXX0':
ret = True
break;
stdin.write('exit' + newLine)
if logFile:
log.close()
return ret
def jdkhome(vm=None):
"""return the JDK directory selected for the 'vm' command"""
build = _vmbuild if _vmSourcesAvailable else 'product'
return _jdk(build, installGraalJar=False)
def print_jdkhome(args, vm=None):
"""print the JDK directory selected for the 'vm' command"""
print jdkhome(vm)
def buildvars(args):
"""describe the variables that can be set by the -D option to the 'mx build' commmand"""
buildVars = {
'ALT_BOOTDIR' : 'The location of the bootstrap JDK installation (default: ' + mx.java().jdk + ')',
'ALT_OUTPUTDIR' : 'Build directory',
'HOTSPOT_BUILD_JOBS' : 'Number of CPUs used by make (default: ' + str(multiprocessing.cpu_count()) + ')',
'INSTALL' : 'Install the built VM into the JDK? (default: y)',
'ZIP_DEBUGINFO_FILES' : 'Install zipped debug symbols file? (default: 0)',
}
mx.log('HotSpot build variables that can be set by the -D option to "mx build":')
mx.log('')
for n in sorted(buildVars.iterkeys()):
mx.log(n)
mx.log(textwrap.fill(buildVars[n], initial_indent=' ', subsequent_indent=' ', width=200))
mx.log('')
mx.log('Note that these variables can be given persistent values in the file ' + join(_graal_home, 'mx', 'env') + ' (see \'mx about\').')
def build(args, vm=None):
"""build the VM binary
The global '--vm' and '--vmbuild' options select which VM type and build target to build."""
# Override to fail quickly if extra arguments are given
# at the end of the command line. This allows for a more
# helpful error message.
class AP(ArgumentParser):
def __init__(self):
ArgumentParser.__init__(self, prog='mx build')
def parse_args(self, args):
result = ArgumentParser.parse_args(self, args)
if len(result.remainder) != 0:
firstBuildTarget = result.remainder[0]
mx.abort('To specify the ' + firstBuildTarget + ' VM build target, you need to use the global "--vmbuild" option. For example:\n' +
' mx --vmbuild ' + firstBuildTarget + ' build')
return result
# Call mx.build to compile the Java sources
parser=AP()
parser.add_argument('--export-dir', help='directory to which graal.jar and graal.options will be copied', metavar='<path>')
parser.add_argument('-D', action='append', help='set a HotSpot build variable (run \'mx buildvars\' to list variables)', metavar='name=value')
opts2 = mx.build(['--source', '1.7'] + args, parser=parser)
assert len(opts2.remainder) == 0
if opts2.export_dir is not None:
if not exists(opts2.export_dir):
os.makedirs(opts2.export_dir)
else:
assert os.path.isdir(opts2.export_dir), '{} is not a directory'.format(opts2.export_dir)
shutil.copy(mx.distribution('GRAAL').path, opts2.export_dir)
graalOptions = join(_graal_home, 'graal.options')
if exists(graalOptions):
shutil.copy(graalOptions, opts2.export_dir)
if not _vmSourcesAvailable or not opts2.native:
return
builds = [_vmbuild]
if vm is None:
vm = _get_vm()
if vm == 'original':
pass
elif vm.startswith('server'):
buildSuffix = ''
elif vm.startswith('client'):
buildSuffix = '1'
else:
assert vm == 'graal', vm
buildSuffix = 'graal'
for build in builds:
if build == 'ide-build-target':
build = os.environ.get('IDE_BUILD_TARGET', None)
if build is None or len(build) == 0:
continue
jdk = _jdk(build, create=True)
if vm == 'original':
if build != 'product':
mx.log('only product build of original VM exists')
continue
vmDir = join(_vmLibDirInJdk(jdk), vm)
if not exists(vmDir):
if mx.get_os() != 'windows':
chmodRecursive(jdk, 0755)
mx.log('Creating VM directory in JDK7: ' + vmDir)
os.makedirs(vmDir)
def filterXusage(line):
if not 'Xusage.txt' in line:
sys.stderr.write(line + os.linesep)
# Check if a build really needs to be done
timestampFile = join(vmDir, '.build-timestamp')
if opts2.force or not exists(timestampFile):
mustBuild = True
else:
mustBuild = False
timestamp = os.path.getmtime(timestampFile)
sources = []
for d in ['src', 'make']:
for root, dirnames, files in os.walk(join(_graal_home, d)):
# ignore <graal>/src/share/tools
if root == join(_graal_home, 'src', 'share'):
dirnames.remove('tools')
sources += [join(root, name) for name in files]
for f in sources:
if len(f) != 0 and os.path.getmtime(f) > timestamp:
mustBuild = True
break
if not mustBuild:
mx.logv('[all files in src and make directories are older than ' + timestampFile[len(_graal_home) + 1:] + ' - skipping native build]')
continue
if platform.system() == 'Windows':
compilelogfile = _graal_home + '/graalCompile.log'
mksHome = mx.get_env('MKS_HOME', 'C:\\cygwin\\bin')
variant = {'client': 'compiler1', 'server': 'compiler2'}.get(vm, vm)
project_config = variant + '_' + build
_runInDebugShell('msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcproj /p:Configuration=' + project_config + ' /target:clean', _graal_home)
winCompileCmd = r'set HotSpotMksHome=' + mksHome + r'& set OUT_DIR=' + jdk + r'& set JAVA_HOME=' + jdk + r'& set path=%JAVA_HOME%\bin;%path%;%HotSpotMksHome%& cd /D "' +_graal_home + r'\make\windows"& call create.bat ' + _graal_home
print(winCompileCmd)
winCompileSuccess = re.compile(r"^Writing \.vcxproj file:")
if not _runInDebugShell(winCompileCmd, _graal_home, compilelogfile, winCompileSuccess):
mx.log('Error executing create command')
return
winBuildCmd = 'msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcxproj /p:Configuration=' + project_config + ' /p:Platform=x64'
if not _runInDebugShell(winBuildCmd, _graal_home, compilelogfile):
mx.log('Error building project')
return
else:
cpus = multiprocessing.cpu_count()
runCmd = [mx.gmake_cmd()]
runCmd.append(build + buildSuffix)
env = os.environ.copy()
if opts2.D:
for nv in opts2.D:
name, value = nv.split('=', 1)
env[name.strip()] = value
env.setdefault('ARCH_DATA_MODEL', '64')
env.setdefault('LANG', 'C')
env.setdefault('HOTSPOT_BUILD_JOBS', str(cpus))
env.setdefault('ALT_BOOTDIR', mx.java().jdk)
if not mx._opts.verbose:
runCmd.append('MAKE_VERBOSE=')
env['JAVA_HOME'] = jdk
if vm.endswith('nograal'):
env['INCLUDE_GRAAL'] = 'false'
env.setdefault('ALT_OUTPUTDIR', join(_graal_home, 'build-nograal', mx.get_os()))
else:
env['INCLUDE_GRAAL'] = 'true'
env.setdefault('INSTALL', 'y')
if mx.get_os() == 'solaris' :
# If using sparcWorks, setup flags to avoid make complaining about CC version
cCompilerVersion = subprocess.Popen('CC -V', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).stderr.readlines()[0]
if cCompilerVersion.startswith('CC: Sun C++') :
compilerRev = cCompilerVersion.split(' ')[3]
env.setdefault('ENFORCE_COMPILER_REV', compilerRev)
env.setdefault('ENFORCE_CC_COMPILER_REV', compilerRev)
if build == 'jvmg':
# I want ALL the symbols when I'm debugging on Solaris
# Some Makefile variable are overloaded by environment variable so we need to explicitely
# pass them down in the command line. This one is an example of that.
runCmd.append('STRIP_POLICY=no_strip')
# This removes the need to unzip the *.diz files before debugging in gdb
env.setdefault('ZIP_DEBUGINFO_FILES', '0')
# Clear these 2 variables as having them set can cause very confusing build problems
env.pop('LD_LIBRARY_PATH', None)
env.pop('CLASSPATH', None)
mx.run(runCmd, cwd=join(_graal_home, 'make'), err=filterXusage, env=env)
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
prefix = '-' + vm + ' '
vmKnown = prefix + 'KNOWN\n'
lines = []
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == vmKnown.strip():
found = True
lines.append(line)
if not found:
mx.log('Appending "' + prefix + 'KNOWN" to ' + jvmCfg)
if mx.get_os() != 'windows':
os.chmod(jvmCfg, 0755)
with open(jvmCfg, 'w') as f:
for line in lines:
if line.startswith(prefix):
line = vmKnown
found = True
f.write(line)
if not found:
f.write(vmKnown)
if exists(timestampFile):
os.utime(timestampFile, None)
else:
file(timestampFile, 'a')
def vmg(args):
"""run the debug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='debug')
def vmfg(args):
"""run the fastdebug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='fastdebug')
def vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, vmbuild=None):
"""run the VM selected by the '--vm' option"""
if vm is None:
vm = _get_vm()
if cwd is None:
cwd = _vm_cwd
elif _vm_cwd is not None and _vm_cwd != cwd:
mx.abort("conflicting working directories: do not set --vmcwd for this command")
build = vmbuild if vmbuild is not None else _vmbuild if _vmSourcesAvailable else 'product'
jdk = _jdk(build, vmToCheck=vm, installGraalJar=False)
mx.expand_project_in_args(args)
if _make_eclipse_launch:
mx.make_eclipse_launch(args, 'graal-' + build, name=None, deps=mx.project('com.oracle.graal.hotspot').all_deps([], True))
if len([a for a in args if 'PrintAssembly' in a]) != 0:
hsdis([], copyToDir=_vmLibDirInJdk(jdk))
if mx.java().debug_port is not None:
args = ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(mx.java().debug_port)] + args
if _jacoco == 'on' or _jacoco == 'append':
jacocoagent = mx.library("JACOCOAGENT", True)
# Exclude all compiler tests and snippets
excludes = ['com.oracle.graal.compiler.tests.*', 'com.oracle.graal.jtt.*']
for p in mx.projects():
excludes += _find_classes_with_annotations(p, None, ['@Snippet', '@ClassSubstitution', '@Test'], includeInnerClasses=True).keys()
excludes += p.find_classes_with_matching_source_line(None, lambda line: 'JaCoCo Exclude' in line, includeInnerClasses=True).keys()
includes = ['com.oracle.graal.*']
agentOptions = {
'append' : 'true' if _jacoco == 'append' else 'false',
'bootclasspath' : 'true',
'includes' : ':'.join(includes),
'excludes' : ':'.join(excludes),
'destfile' : 'jacoco.exec'
}
args = ['-javaagent:' + jacocoagent.get_path(True) + '=' + ','.join([k + '=' + v for k, v in agentOptions.items()])] + args
if '-d64' not in args:
args = ['-d64'] + args
exe = join(jdk, 'bin', mx.exe_suffix('java'))
pfx = _vm_prefix.split() if _vm_prefix is not None else []
if '-version' in args:
ignoredArgs = args[args.index('-version')+1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return mx.run(pfx + [exe, '-' + vm] + args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _find_classes_with_annotations(p, pkgRoot, annotations, includeInnerClasses=False):
"""
Scan the sources of project 'p' for Java source files containing a line starting with 'annotation'
(ignoring preceding whitespace) and return the fully qualified class name for each Java
source file matched in a list.
"""
matches = lambda line : len([a for a in annotations if line == a or line.startswith(a + '(')]) != 0
return p.find_classes_with_matching_source_line(pkgRoot, matches, includeInnerClasses)
def _extract_VM_args(args, allowClasspath=False, useDoubleDash=False):
"""
Partitions a command line into a leading sequence of HotSpot VM options and the rest.
"""
for i in range(0, len(args)):
if useDoubleDash:
if args[i] == '--':
vmArgs = args[:i]
remainder = args[i + 1:]
return vmArgs, remainder
else:
if not args[i].startswith('-'):
if i != 0 and (args[i - 1] == '-cp' or args[i - 1] == '-classpath'):
if not allowClasspath:
mx.abort('Cannot supply explicit class path option')
else:
continue
vmArgs = args[:i]
remainder = args[i:]
return vmArgs, remainder
return args, []
def _run_tests(args, harness, annotations, testfile):
vmArgs, tests = _extract_VM_args(args)
for t in tests:
if t.startswith('-'):
mx.abort('VM option ' + t + ' must precede ' + tests[0])
def containsAny(c, substrings):
for s in substrings:
if s in c:
return True
return False
candidates = []
for p in mx.projects():
if mx.java().javaCompliance < p.javaCompliance:
continue
candidates += _find_classes_with_annotations(p, None, annotations).keys()
classes = []
if len(tests) == 0:
classes = candidates
else:
for t in tests:
found = False
for c in candidates:
if t in c:
found = True
classes.append(c)
if not found:
mx.log('warning: no tests matched by substring "' + t)
projectscp = mx.classpath([pcp.name for pcp in mx.projects() if pcp.javaCompliance <= mx.java().javaCompliance])
if len(classes) != 0:
f_testfile = open(testfile, 'w')
for c in classes:
f_testfile.write(c + '\n')
f_testfile.close()
harness(projectscp, vmArgs)
def _unittest(args, annotations):
mxdir = dirname(__file__)
name = 'JUnitWrapper'
javaSource = join(mxdir, name + '.java')
javaClass = join(mxdir, name + '.class')
testfile = os.environ.get('MX_TESTFILE', None)
if testfile is None:
(_, testfile) = tempfile.mkstemp(".testclasses", "graal")
os.close(_)
def harness(projectscp, vmArgs):
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
subprocess.check_call([mx.java().javac, '-cp', projectscp, '-d', mxdir, javaSource])
if not isGraalEnabled(_get_vm()):
prefixArgs = ['-esa', '-ea']
else:
prefixArgs = ['-XX:-BootstrapGraal', '-esa', '-ea']
with open(testfile) as fp:
testclasses = [l.rstrip() for l in fp.readlines()]
if len(testclasses) == 1:
# Execute Junit directly when one test is being run. This simplifies
# replaying the VM execution in a native debugger (e.g., gdb).
vm(prefixArgs + vmArgs + ['-cp', projectscp, 'org.junit.runner.JUnitCore'] + testclasses)
else:
vm(prefixArgs + vmArgs + ['-cp', projectscp + os.pathsep + mxdir, name] + [testfile])
try:
_run_tests(args, harness, annotations, testfile)
finally:
if os.environ.get('MX_TESTFILE') is None:
os.remove(testfile)
_unittestHelpSuffix = """
If filters are supplied, only tests whose fully qualified name
includes a filter as a substring are run.
For example, this command line:
mx unittest -G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG BC_aload
will run all JUnit test classes that contain 'BC_aload' in their
fully qualified name and will pass these options to the VM:
-G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG
To get around command line length limitations on some OSes, the
JUnit class names to be executed are written to a file that a
custom JUnit wrapper reads and passes onto JUnit proper. The
MX_TESTFILE environment variable can be set to specify a
file which will not be deleted once the unittests are done
(unlike the temporary file otherwise used).
As with all other commands, using the global '-v' before 'unittest'
command will cause mx to show the complete command line
it uses to run the VM.
"""
def unittest(args):
"""run the JUnit tests (all testcases){0}"""
_unittest(args, ['@Test', '@LongTest', '@Parameters'])
def shortunittest(args):
"""run the JUnit tests (short testcases only){0}"""
_unittest(args, ['@Test'])
def longunittest(args):
"""run the JUnit tests (long testcases only){0}"""
_unittest(args, ['@LongTest', '@Parameters'])
def buildvms(args):
"""build one or more VMs in various configurations"""
vmsDefault = ','.join(_vmChoices.keys())
vmbuildsDefault = ','.join(_vmbuildChoices)
parser = ArgumentParser(prog='mx buildvms');
parser.add_argument('--vms', help='a comma separated list of VMs to build (default: ' + vmsDefault + ')', metavar='<args>', default=vmsDefault)
parser.add_argument('--builds', help='a comma separated list of build types (default: ' + vmbuildsDefault + ')', metavar='<args>', default=vmbuildsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
parser.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log file')
args = parser.parse_args(args)
vms = args.vms.split(',')
builds = args.builds.split(',')
allStart = time.time()
for v in vms:
for vmbuild in builds:
if v == 'original' and vmbuild != 'product':
continue
if not args.console:
logFile = join(v + '-' + vmbuild + '.log')
log = open(join(_graal_home, logFile), 'wb')
start = time.time()
mx.log('BEGIN: ' + v + '-' + vmbuild + '\t(see: ' + logFile + ')')
# Run as subprocess so that output can be directed to a file
subprocess.check_call([sys.executable, '-u', join('mxtool', 'mx.py'), '--vm', v, '--vmbuild', vmbuild, 'build'], cwd=_graal_home, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + v + '-' + vmbuild + '\t[' + str(duration) + ']')
else:
with VM(v, vmbuild):
build([])
if not args.no_check:
vmargs = ['-version']
if v == 'graal':
vmargs.insert(0, '-XX:-BootstrapGraal')
vm(vmargs, vm=v, vmbuild=vmbuild)
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
def gate(args):
"""run the tests used to validate a push
If this command exits with a 0 exit code, then the source code is in
a state that would be accepted for integration into the main repository."""
class Task:
def __init__(self, title):
self.start = time.time()
self.title = title
self.end = None
self.duration = None
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: BEGIN: ') + title)
def stop(self):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: END: ') + self.title + ' [' + str(self.duration) + ']')
return self
def abort(self, codeOrMessage):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: ABORT: ') + self.title + ' [' + str(self.duration) + ']')
mx.abort(codeOrMessage)
return self
parser = ArgumentParser(prog='mx gate');
parser.add_argument('-j', '--omit-java-clean', action='store_false', dest='cleanJava', help='omit cleaning Java native code')
parser.add_argument('-n', '--omit-native-clean', action='store_false', dest='cleanNative', help='omit cleaning and building native code')
parser.add_argument('-g', '--only-build-graalvm', action='store_false', dest='buildNonGraal', help='only build the Graal VM')
parser.add_argument('--jacocout', help='specify the output directory for jacoco report')
args = parser.parse_args(args)
global _jacoco
tasks = []
total = Task('Gate')
try:
t = Task('Clean')
cleanArgs = []
if not args.cleanNative:
cleanArgs.append('--no-native')
if not args.cleanJava:
cleanArgs.append('--no-java')
clean(cleanArgs)
tasks.append(t.stop())
t = Task('IDEConfigCheck')
mx.ideclean([])
mx.ideinit([])
tasks.append(t.stop())
eclipse_exe = os.environ.get('ECLIPSE_EXE')
if eclipse_exe is not None:
t = Task('CodeFormatCheck')
if mx.eclipseformat(['-e', eclipse_exe]) != 0:
t.abort('Formatter modified files - run "mx eclipseformat", check in changes and repush')
tasks.append(t.stop())
t = Task('Canonicalization Check')
mx.log(time.strftime('%d %b %Y %H:%M:%S - Ensuring mx/projects files are canonicalized...'))
if mx.canonicalizeprojects([]) != 0:
t.abort('Rerun "mx canonicalizeprojects" and check-in the modified mx/projects files.')
tasks.append(t.stop())
t = Task('BuildJava')
build(['--no-native', '--jdt-warning-as-error'])
tasks.append(t.stop())
t = Task('Checkstyle')
if mx.checkstyle([]) != 0:
t.abort('Checkstyle warnings were found')
tasks.append(t.stop())
if exists('jacoco.exec'):
os.unlink('jacoco.exec')
if args.jacocout is not None:
_jacoco = 'append'
else:
_jacoco = 'off'
t = Task('BuildHotSpotGraal: fastdebug,product')
buildvms(['--vms', 'graal,server', '--builds', 'fastdebug,product'])
tasks.append(t.stop())
with VM('graal', 'fastdebug'):
t = Task('BootstrapWithSystemAssertions:fastdebug')
vm(['-esa', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithGCVerification:product')
vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithG1GCVerification:product')
vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:-UseSerialGC','-XX:+UseG1GC','-XX:+UseNewCode','-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithRegisterPressure:product')
vm(['-G:RegisterPressure=rbx,r11,r10,r14,xmm3,xmm11,xmm14', '-esa', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithAOTConfiguration:product')
vm(['-G:+AOTCompilation', '-G:+VerifyPhases', '-esa', '-version'])
tasks.append(t.stop())
with VM('server', 'product'): # hosted mode
t = Task('UnitTests:hosted-product')
unittest([])
tasks.append(t.stop())
for vmbuild in ['fastdebug', 'product']:
for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild):
t = Task(str(test) + ':' + vmbuild)
if not test.test('graal'):
t.abort(test.name + ' Failed')
tasks.append(t.stop())
if args.jacocout is not None:
jacocoreport([args.jacocout])
_jacoco = 'off'
t = Task('CleanAndBuildGraalVisualizer')
mx.run(['ant', '-f', join(_graal_home, 'visualizer', 'build.xml'), '-q', 'clean', 'build'])
tasks.append(t.stop())
# Prevent Graal modifications from breaking the standard builds
if args.buildNonGraal:
t = Task('BuildHotSpotVarieties')
buildvms(['--vms', 'client,server', '--builds', 'fastdebug,product'])
buildvms(['--vms', 'server-nograal', '--builds', 'product'])
buildvms(['--vms', 'server-nograal', '--builds', 'optimized'])
tasks.append(t.stop())
for vmbuild in ['product', 'fastdebug']:
for theVm in ['client', 'server']:
with VM(theVm, vmbuild):
t = Task('DaCapo_pmd:' + theVm + ':' + vmbuild)
dacapo(['pmd'])
tasks.append(t.stop())
t = Task('UnitTests:' + theVm + ':' + vmbuild)
unittest(['-XX:CompileCommand=exclude,*::run*', 'graal.api'])
tasks.append(t.stop())
except KeyboardInterrupt:
total.abort(1)
except BaseException as e:
import traceback
traceback.print_exc()
total.abort(str(e))
total.stop()
mx.log('Gate task times:')
for t in tasks:
mx.log(' ' + str(t.duration) + '\t' + t.title)
mx.log(' =======')
mx.log(' ' + str(total.duration))
def deoptalot(args):
"""bootstrap a fastdebug Graal VM with DeoptimizeALot and VerifyOops on
If the first argument is a number, the process will be repeated
this number of times. All other arguments are passed to the VM."""
count = 1
if len(args) > 0 and args[0].isdigit():
count = int(args[0])
del args[0]
for _ in range(count):
if not vm(['-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version'], vmbuild='fastdebug') == 0:
mx.abort("Failed")
def longtests(args):
deoptalot(['15', '-Xmx48m'])
dacapo(['100', 'eclipse', '-esa'])
def gv(args):
"""run the Graal Visualizer"""
with open(join(_graal_home, '.graal_visualizer.log'), 'w') as fp:
mx.logv('[Graal Visualizer log is in ' + fp.name + ']')
if not exists(join(_graal_home, 'visualizer', 'build.xml')):
mx.logv('[This initial execution may take a while as the NetBeans platform needs to be downloaded]')
mx.run(['ant', '-f', join(_graal_home, 'visualizer', 'build.xml'), '-l', fp.name, 'run'])
def igv(args):
"""run the Ideal Graph Visualizer"""
with open(join(_graal_home, '.ideal_graph_visualizer.log'), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
if not exists(join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')):
mx.logv('[This initial execution may take a while as the NetBeans platform needs to be downloaded]')
mx.run(['ant', '-f', join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'), '-l', fp.name, 'run'])
def bench(args):
"""run benchmarks and parse their output for results
Results are JSON formated : {group : {benchmark : score}}."""
resultFile = None
if '-resultfile' in args:
index = args.index('-resultfile')
if index + 1 < len(args):
resultFile = args[index + 1]
del args[index]
del args[index]
else:
mx.abort('-resultfile must be followed by a file name')
vm = _get_vm()
if len(args) is 0:
args = ['all']
vmArgs = [arg for arg in args if arg.startswith('-')]
def benchmarks_in_group(group):
prefix = group + ':'
return [a[len(prefix):] for a in args if a.startswith(prefix)]
results = {}
benchmarks = []
#DaCapo
if ('dacapo' in args or 'all' in args):
benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
dacapos = benchmarks_in_group('dacapo')
for dacapo in dacapos:
if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
mx.abort('Unknown DaCapo : ' + dacapo)
iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
if (iterations > 0):
benchmarks += [sanitycheck.getDacapo(dacapo, iterations)]
if ('scaladacapo' in args or 'all' in args):
benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
scaladacapos = benchmarks_in_group('scaladacapo')
for scaladacapo in scaladacapos:
if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
if (iterations > 0):
benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
#Bootstrap
if ('bootstrap' in args or 'all' in args):
benchmarks += sanitycheck.getBootstraps()
#SPECjvm2008
if ('specjvm2008' in args or 'all' in args):
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
else:
specjvms = benchmarks_in_group('specjvm2008')
for specjvm in specjvms:
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
if ('specjbb2005' in args or 'all' in args):
benchmarks += [sanitycheck.getSPECjbb2005()]
if ('specjbb2013' in args): # or 'all' in args //currently not in default set
benchmarks += [sanitycheck.getSPECjbb2013()]
if ('ctw-full' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
if ('ctw-noinline' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
if ('ctw-nocomplex' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoComplex))
for test in benchmarks:
for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
group = results.setdefault(groupName, {})
group.update(res)
mx.log(json.dumps(results))
if resultFile:
with open(resultFile, 'w') as f:
f.write(json.dumps(results))
def specjvm2008(args):
"""run one or more SPECjvm2008 benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(_get_vm(), extraVmOpts=extraVmOpts)
availableBenchmarks = set(sanitycheck.specjvm2008Names)
for name in sanitycheck.specjvm2008Names:
parts = name.rsplit('.', 1)
if len(parts) > 1:
assert len(parts) == 2
group = parts[0]
print group
availableBenchmarks.add(group)
_run_benchmark(args, sorted(availableBenchmarks), launcher)
def specjbb2013(args):
"""runs the composite SPECjbb2013 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2013(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def specjbb2005(args):
"""runs the composite SPECjbb2005 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2005(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
lib = mx.add_lib_suffix('hsdis-' + _arch())
path = join(_graal_home, 'lib', lib)
if not exists(path):
mx.download(path, ['http://lafo.ssw.uni-linz.ac.at/hsdis/' + flavor + "/" + lib])
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis');
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = join(_graal_home, 'lib', 'hcfdis-1.jar')
if not exists(path):
mx.download(path, ['http://lafo.ssw.uni-linz.ac.at/hcfdis-1.jar'])
mx.run_java(['-jar', path] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol;
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jacocoreport(args):
"""create a JaCoCo coverage report
Creates the report from the 'jacoco.exec' file in the current directory.
Default output directory is 'coverage', but an alternative can be provided as an argument."""
jacocoreport = mx.library("JACOCOREPORT", True)
out = 'coverage'
if len(args) == 1:
out = args[0]
elif len(args) > 1:
mx.abort('jacocoreport takes only one argument : an output directory')
mx.run_java(['-jar', jacocoreport.get_path(True), '-in', 'jacoco.exec', '-g', join(_graal_home, 'graal'), out])
def isGraalEnabled(vm):
return vm != 'original' and not vm.endswith('nograal')
def site(args):
"""create a website containing javadoc and the project dependency graph"""
return mx.site(['--name', 'Graal',
'--jd', '@-tag', '--jd', '@test:X',
'--jd', '@-tag', '--jd', '@run:X',
'--jd', '@-tag', '--jd', '@bug:X',
'--jd', '@-tag', '--jd', '@summary:X',
'--jd', '@-tag', '--jd', '@vmoption:X',
'--overview', join(_graal_home, 'graal', 'overview.html'),
'--title', 'Graal OpenJDK Project Documentation',
'--dot-output-base', 'projects'] + args)
def mx_init(suite):
commands = {
'build': [build, ''],
'buildvars': [buildvars, ''],
'buildvms': [buildvms, '[-options]'],
'clean': [clean, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jdkhome': [print_jdkhome, ''],
'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'],
'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'],
'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'],
'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'],
'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'],
'gate' : [gate, '[-options]'],
'gv' : [gv, ''],
'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'],
'unittest' : [unittest, '[VM options] [filters...]', _unittestHelpSuffix],
'longunittest' : [longunittest, '[VM options] [filters...]', _unittestHelpSuffix],
'shortunittest' : [shortunittest, '[VM options] [filters...]', _unittestHelpSuffix],
'jacocoreport' : [jacocoreport, '[output directory]'],
'site' : [site, '[-options]'],
'vm': [vm, '[-options] class [args...]'],
'vmg': [vmg, '[-options] class [args...]'],
'vmfg': [vmfg, '[-options] class [args...]'],
'deoptalot' : [deoptalot, '[n]'],
'longtests' : [longtests, '']
}
mx.add_argument('--jacoco', help='instruments com.oracle.* classes using JaCoCo', default='off', choices=['off', 'on', 'append'])
mx.add_argument('--vmcwd', dest='vm_cwd', help='current directory will be changed to <path> before the VM is executed', default=None, metavar='<path>')
mx.add_argument('--installed-jdks', help='the base directory in which the JDKs cloned from $JAVA_HOME exist. ' +
'The VM selected by --vm and --vmbuild options is under this directory (i.e., ' +
join('<path>', '<vmbuild>', 'jre', 'lib', '<vm>', mx.add_lib_prefix(mx.add_lib_suffix('jvm'))) + ')', default=None, metavar='<path>')
if (_vmSourcesAvailable):
mx.add_argument('--vm', action='store', dest='vm', choices=_vmChoices.keys(), help='the VM type to build/run')
mx.add_argument('--vmbuild', action='store', dest='vmbuild', choices=_vmbuildChoices, help='the VM build to build/run (default: ' + _vmbuildChoices[0] +')')
mx.add_argument('--ecl', action='store_true', dest='make_eclipse_launch', help='create launch configuration for running VM execution(s) in Eclipse')
mx.add_argument('--vmprefix', action='store', dest='vm_prefix', help='prefix for running the VM (e.g. "/usr/bin/gdb --args")', metavar='<prefix>')
mx.add_argument('--gdb', action='store_const', const='/usr/bin/gdb --args', dest='vm_prefix', help='alias for --vmprefix "/usr/bin/gdb --args"')
commands.update({
'export': [export, '[-options] [zipfile]'],
})
mx.update_commands(suite, commands)
def mx_post_parse_cmd_line(opts):#
# TODO _minVersion check could probably be part of a Suite in mx?
if (mx.java().version < _minVersion) :
mx.abort('Requires Java version ' + str(_minVersion) + ' or greater, got version ' + str(mx.java().version))
if (_vmSourcesAvailable):
if hasattr(opts, 'vm') and opts.vm is not None:
global _vm
_vm = opts.vm
if hasattr(opts, 'vmbuild') and opts.vmbuild is not None:
global _vmbuild
_vmbuild = opts.vmbuild
global _make_eclipse_launch
_make_eclipse_launch = getattr(opts, 'make_eclipse_launch', False)
global _jacoco
_jacoco = opts.jacoco
global _vm_cwd
_vm_cwd = opts.vm_cwd
global _installed_jdks
_installed_jdks = opts.installed_jdks
global _vm_prefix
_vm_prefix = opts.vm_prefix
mx.distribution('GRAAL').add_update_listener(_installGraalJarInJdks)
|
kevinmcain/graal
|
mx/commands.py
|
Python
|
gpl-2.0
| 58,280
|
#!/usr/bin/python3 -Es
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# selinux gui is a tool for the examining and modifying SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
# author: Ryan Hallisey rhallisey@redhat.com
# author: Dan Walsh dwalsh@redhat.com
# author: Miroslav Grepl mgrepl@redhat.com
#
#
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
from sepolicy.sedbus import SELinuxDBus
import sys
import sepolicy
import selinux
from selinux import DISABLED, PERMISSIVE, ENFORCING
import sepolicy.network
import sepolicy.manpage
import dbus
import time
import os
import re
import gettext
import unicodedata
PROGNAME = "policycoreutils"
gettext.bindtextdomain(PROGNAME, "/usr/share/locale")
gettext.textdomain(PROGNAME)
try:
gettext.install(PROGNAME,
unicode=True,
codeset='utf-8')
except TypeError:
# Failover to python3 install
gettext.install(PROGNAME,
codeset='utf-8')
except IOError:
import builtins
builtins.__dict__['_'] = str
reverse_file_type_str = {}
for f in sepolicy.file_type_str:
reverse_file_type_str[sepolicy.file_type_str[f]] = f
enabled = [_("No"), _("Yes")]
action = [_("Disable"), _("Enable")]
import distutils.sysconfig
ADVANCED_LABEL = (_("Advanced >>"), _("Advanced <<"))
ADVANCED_SEARCH_LABEL = (_("Advanced Search >>"), _("Advanced Search <<"))
OUTBOUND_PAGE = 0
INBOUND_PAGE = 1
TRANSITIONS_FROM_PAGE = 0
TRANSITIONS_TO_PAGE = 1
TRANSITIONS_FILE_PAGE = 2
EXE_PAGE = 0
WRITABLE_PAGE = 1
APP_PAGE = 2
BOOLEANS_PAGE = 0
FILES_PAGE = 1
NETWORK_PAGE = 2
TRANSITIONS_PAGE = 3
LOGIN_PAGE = 4
USER_PAGE = 5
LOCKDOWN_PAGE = 6
SYSTEM_PAGE = 7
FILE_EQUIV_PAGE = 8
START_PAGE = 9
keys = ["boolean", "fcontext", "fcontext-equiv", "port", "login", "user", "module", "node", "interface"]
DISABLED_TEXT = _("""<small>
To change from Disabled to Enforcing mode
- Change the system mode from Disabled to Permissive
- Reboot, so that the system can relabel
- Once the system is working as planned
* Change the system mode to Enforcing</small>
""")
class SELinuxGui():
def __init__(self, app=None, test=False):
self.finish_init = False
self.advanced_init = True
self.opage = START_PAGE
self.dbus = SELinuxDBus()
try:
customized = self.dbus.customized()
except dbus.exceptions.DBusException as e:
print(e)
self.quit()
self.init_cur()
self.application = app
self.filter_txt = ""
builder = Gtk.Builder() # BUILDER OBJ
self.code_path = distutils.sysconfig.get_python_lib(plat_specific=True) + "/sepolicy/"
glade_file = self.code_path + "sepolicy.glade"
builder.add_from_file(glade_file)
self.outer_notebook = builder.get_object("outer_notebook")
self.window = builder.get_object("SELinux_window")
self.main_selection_window = builder.get_object("Main_selection_menu")
self.main_advanced_label = builder.get_object("main_advanced_label")
self.popup = 0
self.applications_selection_button = builder.get_object("applications_selection_button")
self.revert_button = builder.get_object("Revert_button")
self.busy_cursor = Gdk.Cursor(Gdk.CursorType.WATCH)
self.ready_cursor = Gdk.Cursor(Gdk.CursorType.LEFT_PTR)
self.initialtype = selinux.selinux_getpolicytype()[1]
self.current_popup = None
self.import_export = None
self.clear_entry = True
self.files_add = False
self.network_add = False
self.all_domains = []
self.installed_list = []
self.previously_modified = {}
# file dialog
self.file_dialog = builder.get_object("add_path_dialog")
# Error check ***************************************
self.error_check_window = builder.get_object("error_check_window")
self.error_check_label = builder.get_object("error_check_label")
self.invalid_entry = False
# Advanced search window ****************************
self.advanced_search_window = builder.get_object("advanced_search_window")
self.advanced_search_filter = builder.get_object("advanced_filter")
self.advanced_search_filter.set_visible_func(self.filter_the_data)
self.advanced_search_sort = builder.get_object("advanced_sort")
self.advanced_filter_entry = builder.get_object("advanced_filter_entry")
self.advanced_search_treeview = builder.get_object("advanced_search_treeview")
self.advanced_search = False
# Login Items **************************************
self.login_label = builder.get_object("Login_label")
self.login_seuser_combobox = builder.get_object("login_seuser_combobox")
self.login_seuser_combolist = builder.get_object("login_seuser_liststore")
self.login_name_entry = builder.get_object("login_name_entry")
self.login_mls_label = builder.get_object("login_mls_label")
self.login_mls_entry = builder.get_object("login_mls_entry")
self.login_radio_button = builder.get_object("Login_button")
self.login_treeview = builder.get_object("login_treeview")
self.login_liststore = builder.get_object("login_liststore")
self.login_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.login_filter = builder.get_object("login_filter")
self.login_filter.set_visible_func(self.filter_the_data)
self.login_popup_window = builder.get_object("login_popup_window")
self.login_delete_liststore = builder.get_object("login_delete_liststore")
self.login_delete_window = builder.get_object("login_delete_window")
# Users Items **************************************
self.user_popup_window = builder.get_object("user_popup_window")
self.user_radio_button = builder.get_object("User_button")
self.user_liststore = builder.get_object("user_liststore")
self.user_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.user_filter = builder.get_object("user_filter")
self.user_filter.set_visible_func(self.filter_the_data)
self.user_treeview = builder.get_object("user_treeview")
self.user_roles_combobox = builder.get_object("user_roles_combobox")
self.user_roles_combolist = builder.get_object("user_roles_liststore")
self.user_label = builder.get_object("User_label")
self.user_name_entry = builder.get_object("user_name_entry")
self.user_mls_label = builder.get_object("user_mls_label")
self.user_mls_level_entry = builder.get_object("user_mls_level_entry")
self.user_mls_entry = builder.get_object("user_mls_entry")
self.user_combobox = builder.get_object("selinux_user_combobox")
self.user_delete_liststore = builder.get_object("user_delete_liststore")
self.user_delete_window = builder.get_object("user_delete_window")
# File Equiv Items **************************************
self.file_equiv_label = builder.get_object("file_equiv_label")
self.file_equiv_source_entry = builder.get_object("file_equiv_source_entry")
self.file_equiv_dest_entry = builder.get_object("file_equiv_dest_entry")
self.file_equiv_radio_button = builder.get_object("file_equiv_button")
self.file_equiv_treeview = builder.get_object("file_equiv_treeview")
self.file_equiv_liststore = builder.get_object("file_equiv_liststore")
self.file_equiv_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.file_equiv_popup_window = builder.get_object("file_equiv_popup_window")
self.file_equiv_treefilter = builder.get_object("file_equiv_filter")
self.file_equiv_treefilter.set_visible_func(self.filter_the_data)
self.file_equiv_delete_liststore = builder.get_object("file_equiv_delete_liststore")
self.file_equiv_delete_window = builder.get_object("file_equiv_delete_window")
# System Items **************************************
self.app_system_button = builder.get_object("app_system_button")
self.system_radio_button = builder.get_object("System_button")
self.lockdown_radio_button = builder.get_object("Lockdown_button")
self.systems_box = builder.get_object("Systems_box")
self.relabel_button = builder.get_object("Relabel_button")
self.relabel_button_no = builder.get_object("Relabel_button_no")
self.advanced_system = builder.get_object("advanced_system")
self.outer_notebook_frame = builder.get_object("outer_notebook_frame")
self.system_policy_label = builder.get_object("system_policy_type_label")
# Browse Items **************************************
self.select_button_browse = builder.get_object("select_button_browse")
self.cancel_button_browse = builder.get_object("cancel_button_browse")
# More types window items ***************************
self.moreTypes_window_files = builder.get_object("moreTypes_window_files")
self.more_types_files_liststore = builder.get_object("more_types_file_liststore")
self.moreTypes_treeview = builder.get_object("moreTypes_treeview_files")
# System policy type ********************************
self.system_policy_type_liststore = builder.get_object("system_policy_type_liststore")
self.system_policy_type_combobox = builder.get_object("system_policy_type_combobox")
self.policy_list = []
if self.populate_system_policy() < 2:
self.advanced_system.set_visible(False)
self.system_policy_label.set_visible(False)
self.system_policy_type_combobox.set_visible(False)
self.enforcing_button_default = builder.get_object("Enforcing_button_default")
self.permissive_button_default = builder.get_object("Permissive_button_default")
self.disabled_button_default = builder.get_object("Disabled_button_default")
self.initialize_system_default_mode()
# Lockdown Window *********************************
self.enable_unconfined_button = builder.get_object("enable_unconfined")
self.disable_unconfined_button = builder.get_object("disable_unconfined")
self.enable_permissive_button = builder.get_object("enable_permissive")
self.disable_permissive_button = builder.get_object("disable_permissive")
self.enable_ptrace_button = builder.get_object("enable_ptrace")
self.disable_ptrace_button = builder.get_object("disable_ptrace")
# Help Window *********************************
self.help_window = builder.get_object("help_window")
self.help_text = builder.get_object("help_textv")
self.info_text = builder.get_object("info_text")
self.help_image = builder.get_object("help_image")
self.forward_button = builder.get_object("forward_button")
self.back_button = builder.get_object("back_button")
# Update menu items *********************************
self.update_window = builder.get_object("update_window")
self.update_treeview = builder.get_object("update_treeview")
self.update_treestore = builder.get_object("Update_treestore")
self.apply_button = builder.get_object("apply_button")
self.update_button = builder.get_object("Update_button")
# Add button objects ********************************
self.add_button = builder.get_object("Add_button")
self.delete_button = builder.get_object("Delete_button")
self.files_path_entry = builder.get_object("files_path_entry")
self.network_ports_entry = builder.get_object("network_ports_entry")
self.files_popup_window = builder.get_object("files_popup_window")
self.network_popup_window = builder.get_object("network_popup_window")
self.popup_network_label = builder.get_object("Network_label")
self.popup_files_label = builder.get_object("files_label")
self.recursive_path_toggle = builder.get_object("make_path_recursive")
self.files_type_combolist = builder.get_object("files_type_combo_store")
self.files_class_combolist = builder.get_object("files_class_combo_store")
self.files_type_combobox = builder.get_object("files_type_combobox")
self.files_class_combobox = builder.get_object("files_class_combobox")
self.files_mls_label = builder.get_object("files_mls_label")
self.files_mls_entry = builder.get_object("files_mls_entry")
self.advanced_text_files = builder.get_object("Advanced_text_files")
self.files_cancel_button = builder.get_object("cancel_delete_files")
self.network_tcp_button = builder.get_object("tcp_button")
self.network_udp_button = builder.get_object("udp_button")
self.network_port_type_combolist = builder.get_object("network_type_combo_store")
self.network_port_type_combobox = builder.get_object("network_type_combobox")
self.network_mls_label = builder.get_object("network_mls_label")
self.network_mls_entry = builder.get_object("network_mls_entry")
self.advanced_text_network = builder.get_object("Advanced_text_network")
self.network_cancel_button = builder.get_object("cancel_network_delete")
# Add button objects ********************************
# Modify items **************************************
self.show_mislabeled_files_only = builder.get_object("Show_mislabeled_files")
self.mislabeled_files_label = builder.get_object("mislabeled_files_label")
self.warning_files = builder.get_object("warning_files")
self.modify_button = builder.get_object("Modify_button")
self.modify_button.set_sensitive(False)
# Modify items **************************************
# Fix label *****************************************
self.fix_label_window = builder.get_object("fix_label_window")
self.fixlabel_label = builder.get_object("fixlabel_label")
self.fix_label_cancel = builder.get_object("fix_label_cancel")
# Fix label *****************************************
# Delete items **************************************
self.files_delete_window = builder.get_object("files_delete_window")
self.files_delete_treeview = builder.get_object("files_delete_treeview")
self.files_delete_liststore = builder.get_object("files_delete_liststore")
self.network_delete_window = builder.get_object("network_delete_window")
self.network_delete_treeview = builder.get_object("network_delete_treeview")
self.network_delete_liststore = builder.get_object("network_delete_liststore")
# Delete items **************************************
# Progress bar **************************************
self.progress_bar = builder.get_object("progress_bar")
# Progress bar **************************************
# executable_files items ****************************
self.executable_files_treeview = builder.get_object("Executable_files_treeview") # Get the executable files tree view
self.executable_files_filter = builder.get_object("executable_files_filter")
self.executable_files_filter.set_visible_func(self.filter_the_data)
self.executable_files_tab = builder.get_object("Executable_files_tab")
self.executable_files_tab_tooltip_txt = self.executable_files_tab.get_tooltip_text()
self.executable_files_liststore = builder.get_object("executable_files_treestore")
self.executable_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.files_radio_button = builder.get_object("files_button")
self.files_button_tooltip_txt = self.files_radio_button.get_tooltip_text()
# executable_files items ****************************
# writable files items ******************************
self.writable_files_treeview = builder.get_object("Writable_files_treeview") # Get the Writable files tree view
self.writable_files_liststore = builder.get_object("writable_files_treestore") # Contains the tree with File Path, SELinux File Label, Class
self.writable_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.writable_files_filter = builder.get_object("writable_files_filter")
self.writable_files_filter.set_visible_func(self.filter_the_data)
self.writable_files_tab = builder.get_object("Writable_files_tab")
self.writable_files_tab_tooltip_txt = self.writable_files_tab.get_tooltip_text()
# writable files items ******************************
# Application File Types ****************************
self.application_files_treeview = builder.get_object("Application_files_treeview") # Get the Application files tree view
self.application_files_filter = builder.get_object("application_files_filter") # Contains the tree with File Path, Description, Class
self.application_files_filter.set_visible_func(self.filter_the_data)
self.application_files_tab = builder.get_object("Application_files_tab")
self.application_files_tab_tooltip_txt = self.writable_files_tab.get_tooltip_text()
self.application_files_liststore = builder.get_object("application_files_treestore")
self.application_files_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.application_files_tab = builder.get_object("Application_files_tab")
self.application_files_tab_tooltip_txt = self.application_files_tab.get_tooltip_text()
# Application File Type *****************************
# network items *************************************
self.network_radio_button = builder.get_object("network_button")
self.network_button_tooltip_txt = self.network_radio_button.get_tooltip_text()
self.network_out_treeview = builder.get_object("outbound_treeview")
self.network_out_liststore = builder.get_object("network_out_liststore")
self.network_out_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.network_out_filter = builder.get_object("network_out_filter")
self.network_out_filter.set_visible_func(self.filter_the_data)
self.network_out_tab = builder.get_object("network_out_tab")
self.network_out_tab_tooltip_txt = self.network_out_tab.get_tooltip_text()
self.network_in_treeview = builder.get_object("inbound_treeview")
self.network_in_liststore = builder.get_object("network_in_liststore")
self.network_in_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.network_in_filter = builder.get_object("network_in_filter")
self.network_in_filter.set_visible_func(self.filter_the_data)
self.network_in_tab = builder.get_object("network_in_tab")
self.network_in_tab_tooltip_txt = self.network_in_tab.get_tooltip_text()
# network items *************************************
# boolean items ************************************
self.boolean_treeview = builder.get_object("Boolean_treeview") # Get the booleans tree list
self.boolean_liststore = builder.get_object("boolean_liststore")
self.boolean_liststore.set_sort_column_id(2, Gtk.SortType.ASCENDING)
self.boolean_filter = builder.get_object("boolean_filter")
self.boolean_filter.set_visible_func(self.filter_the_data)
self.boolean_more_detail_window = builder.get_object("booleans_more_detail_window")
self.boolean_more_detail_treeview = builder.get_object("booleans_more_detail_treeview")
self.boolean_more_detail_tree_data_set = builder.get_object("booleans_more_detail_liststore")
self.boolean_radio_button = builder.get_object("Booleans_button")
self.active_button = self.boolean_radio_button
self.boolean_button_tooltip_txt = self.boolean_radio_button.get_tooltip_text()
# boolean items ************************************
# transitions items ************************************
self.transitions_into_treeview = builder.get_object("transitions_into_treeview") # Get the transitions tree list Enabled, source, Executable File
self.transitions_into_liststore = builder.get_object("transitions_into_liststore") # Contains the tree with
self.transitions_into_liststore.set_sort_column_id(1, Gtk.SortType.ASCENDING)
self.transitions_into_filter = builder.get_object("transitions_into_filter")
self.transitions_into_filter.set_visible_func(self.filter_the_data)
self.transitions_into_tab = builder.get_object("Transitions_into_tab")
self.transitions_into_tab_tooltip_txt = self.transitions_into_tab.get_tooltip_text()
self.transitions_radio_button = builder.get_object("Transitions_button")
self.transitions_button_tooltip_txt = self.transitions_radio_button.get_tooltip_text()
self.transitions_from_treeview = builder.get_object("transitions_from_treeview") # Get the transitions tree list
self.transitions_from_treestore = builder.get_object("transitions_from_treestore") # Contains the tree with Enabled, Executable File Type, Transtype
self.transitions_from_treestore.set_sort_column_id(2, Gtk.SortType.ASCENDING)
self.transitions_from_filter = builder.get_object("transitions_from_filter")
self.transitions_from_filter.set_visible_func(self.filter_the_data)
self.transitions_from_tab = builder.get_object("Transitions_from_tab")
self.transitions_from_tab_tooltip_txt = self.transitions_from_tab.get_tooltip_text()
self.transitions_file_treeview = builder.get_object("file_transitions_treeview") # Get the transitions tree list
self.transitions_file_liststore = builder.get_object("file_transitions_liststore") # Contains the tree with Enabled, Executable File Type, Transtype
self.transitions_file_liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.transitions_file_filter = builder.get_object("file_transitions_filter")
self.transitions_file_filter.set_visible_func(self.filter_the_data)
self.transitions_file_tab = builder.get_object("file_transitions")
self.transitions_file_tab_tooltip_txt = self.transitions_from_tab.get_tooltip_text()
# transitions items ************************************
# Combobox and Entry items **************************
self.combobox_menu = builder.get_object("combobox_org") # This is the combobox box object, aka the arrow next to the entry text bar
self.application_liststore = builder.get_object("application_liststore")
self.completion_entry = builder.get_object("completion_entry") # self.combobox_menu.get_child()
self.entrycompletion_obj = builder.get_object("entrycompletion_obj")
#self.entrycompletion_obj = Gtk.EntryCompletion()
self.entrycompletion_obj.set_minimum_key_length(0)
self.entrycompletion_obj.set_text_column(0)
self.entrycompletion_obj.set_match_func(self.match_func, None)
self.completion_entry.set_completion(self.entrycompletion_obj)
self.completion_entry.set_icon_from_stock(0, Gtk.STOCK_FIND)
# Combobox and Entry items **************************
# Modify buttons ************************************
self.show_modified_only = builder.get_object("Show_modified_only_toggle")
# Modify button *************************************
# status bar *****************************************
self.current_status_label = builder.get_object("Enforcing_label")
self.current_status_enforcing = builder.get_object("Enforcing_button")
self.current_status_permissive = builder.get_object("Permissive_button")
self.status_bar = builder.get_object("status_bar")
self.context_id = self.status_bar.get_context_id("SELinux status")
# filters *********************************************
self.filter_entry = builder.get_object("filter_entry")
self.filter_box = builder.get_object("filter_box")
self.add_modify_delete_box = builder.get_object("add_modify_delete_box")
# Get_model() sets the tree model filter to be the parent of the tree model (tree model has all the data in it)
# Toggle button ****************************************
self.cell = builder.get_object("activate")
self.del_cell_files = builder.get_object("files_toggle_delete")
self.del_cell_files.connect("toggled", self.on_toggle_update, self.files_delete_liststore)
self.del_cell_files_equiv = builder.get_object("file_equiv_toggle_delete1")
self.del_cell_files_equiv.connect("toggled", self.on_toggle_update, self.file_equiv_delete_liststore)
self.del_cell_user = builder.get_object("user_toggle_delete")
self.del_cell_user.connect("toggled", self.on_toggle_update, self.user_delete_liststore)
self.del_cell_login = builder.get_object("login_toggle_delete")
self.del_cell_login.connect("toggled", self.on_toggle_update, self.login_delete_liststore)
self.del_cell_network = builder.get_object("network_toggle_delete")
self.del_cell_network.connect("toggled", self.on_toggle_update, self.network_delete_liststore)
self.update_cell = builder.get_object("toggle_update")
# Notebook items ***************************************
self.outer_notebook = builder.get_object("outer_notebook")
self.inner_notebook_files = builder.get_object("files_inner_notebook")
self.inner_notebook_network = builder.get_object("network_inner_notebook")
self.inner_notebook_transitions = builder.get_object("transitions_inner_notebook")
# logind gui ***************************************
loading_gui = builder.get_object("loading_gui")
self.update_cell.connect("toggled", self.on_toggle_update, self.update_treestore)
self.all_entries = []
# Need to connect button on code because the tree view model is a treeviewsort
self.cell.connect("toggled", self.on_toggle, self.boolean_liststore)
self.loading = 1
path = None
if test:
self.all_domains = ["httpd_t", "abrt_t"]
if app and app not in self.all_domains:
self.all_domains.append(app)
else:
self.all_domains = sepolicy.get_all_domains()
self.all_domains.sort(key=str.lower)
if app and app not in self.all_domains:
self.error(_("%s is not a valid domain" % app))
self.quit()
loading_gui.show()
length = len(self.all_domains)
for domain in self.all_domains:
# After the user selects a path in the drop down menu call
# get_init_entrypoint_target(entrypoint) to get the transtype
# which will give you the application
self.combo_box_add(domain, domain)
self.percentage = float(float(self.loading) / float(length))
self.progress_bar.set_fraction(self.percentage)
self.progress_bar.set_pulse_step(self.percentage)
self.idle_func()
entrypoint = sepolicy.get_init_entrypoint(domain)
if entrypoint:
path = sepolicy.find_entrypoint_path(entrypoint)
if path:
self.combo_box_add(path, domain)
self.installed_list.append(path)
self.loading += 1
loading_gui.hide()
self.entrycompletion_obj.set_model(self.application_liststore)
self.advanced_search_treeview.set_model(self.advanced_search_sort)
dic = {
"on_combo_button_clicked": self.open_combo_menu,
"on_disable_ptrace_toggled": self.on_disable_ptrace,
"on_SELinux_window_configure_event": self.hide_combo_menu,
"on_entrycompletion_obj_match_selected": self.set_application_label,
"on_filter_changed": self.get_filter_data,
"on_save_changes_file_equiv_clicked": self.update_to_file_equiv,
"on_save_changes_login_clicked": self.update_to_login,
"on_save_changes_user_clicked": self.update_to_user,
"on_save_changes_files_clicked": self.update_to_files,
"on_save_changes_network_clicked": self.update_to_network,
"on_Advanced_text_files_button_press_event": self.reveal_advanced,
"item_in_tree_selected": self.cursor_changed,
"on_Application_file_types_treeview_configure_event": self.resize_wrap,
"on_save_delete_clicked": self.on_save_delete_clicked,
"on_moreTypes_treeview_files_row_activated": self.populate_type_combo,
"on_retry_button_files_clicked": self.invalid_entry_retry,
"on_make_path_recursive_toggled": self.recursive_path,
"on_files_path_entry_button_press_event": self.highlight_entry_text,
"on_files_path_entry_changed": self.autofill_add_files_entry,
"on_select_type_files_clicked": self.select_type_more,
"on_choose_file": self.on_browse_select,
"on_Enforcing_button_toggled": self.set_enforce,
"on_confirmation_close": self.confirmation_close,
"on_column_clicked": self.column_clicked,
"on_tab_switch": self.clear_filters,
"on_file_equiv_button_clicked": self.show_file_equiv_page,
"on_app/system_button_clicked": self.system_interface,
"on_app/users_button_clicked": self.users_interface,
"on_show_advanced_search_window": self.on_show_advanced_search_window,
"on_Show_mislabeled_files_toggled": self.show_mislabeled_files,
"on_Browse_button_files_clicked": self.browse_for_files,
"on_cancel_popup_clicked": self.close_popup,
"on_treeview_cursor_changed": self.cursor_changed,
"on_login_seuser_combobox_changed": self.login_seuser_combobox_change,
"on_user_roles_combobox_changed": self.user_roles_combobox_change,
"on_cancel_button_browse_clicked": self.close_config_window,
"on_apply_button_clicked": self.apply_changes_button_press,
"on_Revert_button_clicked": self.update_or_revert_changes,
"on_Update_button_clicked": self.update_or_revert_changes,
"on_advanced_filter_entry_changed": self.get_advanced_filter_data,
"on_advanced_search_treeview_row_activated": self.advanced_item_selected,
"on_Select_advanced_search_clicked": self.advanced_item_button_push,
"on_info_button_button_press_event": self.on_help_button,
"on_back_button_clicked": self.on_help_back_clicked,
"on_forward_button_clicked": self.on_help_forward_clicked,
"on_Boolean_treeview_columns_changed": self.resize_columns,
"on_completion_entry_changed": self.application_selected,
"on_Add_button_clicked": self.add_button_clicked,
"on_Delete_button_clicked": self.delete_button_clicked,
"on_Modify_button_clicked": self.modify_button_clicked,
"on_Show_modified_only_toggled": self.on_show_modified_only,
"on_cancel_button_config_clicked": self.close_config_window,
"on_Import_button_clicked": self.import_config_show,
"on_Export_button_clicked": self.export_config_show,
"on_enable_unconfined_toggled": self.unconfined_toggle,
"on_enable_permissive_toggled": self.permissive_toggle,
"on_system_policy_type_combobox_changed": self.change_default_policy,
"on_Enforcing_button_default_toggled": self.change_default_mode,
"on_Permissive_button_default_toggled": self.change_default_mode,
"on_Disabled_button_default_toggled": self.change_default_mode,
"on_Relabel_button_toggled_cb": self.relabel_on_reboot,
"on_advanced_system_button_press_event": self.reveal_advanced_system,
"on_files_type_combobox_changed": self.show_more_types,
"on_filter_row_changed": self.filter_the_data,
"on_button_toggled": self.tab_change,
"gtk_main_quit": self.closewindow
}
self.previously_modified_initialize(customized)
builder.connect_signals(dic)
self.window.show() # Show the gui to the screen
GLib.timeout_add_seconds(5, self.selinux_status)
self.selinux_status()
self.lockdown_inited = False
self.add_modify_delete_box.hide()
self.filter_box.hide()
if self.status == DISABLED:
self.show_system_page()
else:
if self.application:
self.applications_selection_button.set_label(self.application)
self.completion_entry.set_text(self.application)
self.show_applications_page()
self.tab_change()
else:
self.clearbuttons()
self.outer_notebook.set_current_page(START_PAGE)
self.reinit()
self.finish_init = True
Gtk.main()
def init_cur(self):
self.cur_dict = {}
for k in keys:
self.cur_dict[k] = {}
def remove_cur(self, ctr):
i = 0
for k in self.cur_dict:
for j in self.cur_dict[k]:
if i == ctr:
del(self.cur_dict[k][j])
return
i += 1
def selinux_status(self):
try:
self.status = selinux.security_getenforce()
except OSError:
self.status = DISABLED
if self.status == DISABLED:
self.current_status_label.set_sensitive(False)
self.current_status_enforcing.set_sensitive(False)
self.current_status_permissive.set_sensitive(False)
self.enforcing_button_default.set_sensitive(False)
self.status_bar.push(self.context_id, _("System Status: Disabled"))
self.info_text.set_label(DISABLED_TEXT)
else:
self.set_enforce_text(self.status)
if os.path.exists('/.autorelabel'):
self.relabel_button.set_active(True)
else:
self.relabel_button_no.set_active(True)
policytype = selinux.selinux_getpolicytype()[1]
mode = selinux.selinux_getenforcemode()[1]
if mode == ENFORCING:
self.enforcing_button_default.set_active(True)
if mode == PERMISSIVE:
self.permissive_button_default.set_active(True)
if mode == DISABLED:
self.disabled_button_default.set_active(True)
return True
def lockdown_init(self):
if self.lockdown_inited:
return
self.wait_mouse()
self.lockdown_inited = True
self.disable_ptrace_button.set_active(selinux.security_get_boolean_active("deny_ptrace"))
self.module_dict = {}
for m in self.dbus.semodule_list().split("\n"):
mod = m.split()
if len(mod) < 3:
continue
self.module_dict[mod[0]] = {"version": mod[1], "Disabled": (len(mod) > 2)}
self.module_dict[mod[1]] = { "priority": mod[0], "Disabled" : (len(mod) > 3) }
self.enable_unconfined_button.set_active(not self.module_dict["unconfined"]["Disabled"])
self.enable_permissive_button.set_active(not self.module_dict["permissivedomains"]["Disabled"])
self.ready_mouse()
def column_clicked(self, treeview, treepath, treecol, *args):
iter = self.get_selected_iter()
if not iter:
return
if self.opage == BOOLEANS_PAGE:
if treecol.get_name() == "more_detail_col":
self.display_more_detail(self.window, treepath)
if self.opage == FILES_PAGE:
visible = self.liststore.get_value(iter, 3)
# If visible is true then fix mislabeled will be visible
if treecol.get_name() == "restorecon_col" and visible:
self.fix_mislabeled(self.liststore.get_value(iter, 0))
if self.opage == TRANSITIONS_PAGE:
bool_name = self.liststore.get_value(iter, 1)
if bool_name:
self.boolean_radio_button.clicked()
self.filter_entry.set_text(bool_name)
def idle_func(self):
while Gtk.events_pending():
Gtk.main_iteration()
def match_func(self, completion, key_string, iter, func_data):
try:
if self.application_liststore.get_value(iter, 0).find(key_string) != -1:
return True
return False
except AttributeError:
pass
def help_show_page(self):
self.back_button.set_sensitive(self.help_page != 0)
self.forward_button.set_sensitive(self.help_page < (len(self.help_list) - 1))
try:
fd = open("%shelp/%s.txt" % (self.code_path, self.help_list[self.help_page]), "r")
buf = fd.read()
fd.close()
except IOError:
buf = ""
help_text = self.help_text.get_buffer()
help_text.set_text(buf % {"APP": self.application})
self.help_text.set_buffer(help_text)
self.help_image.set_from_file("%shelp/%s.png" % (self.code_path, self.help_list[self.help_page]))
self.show_popup(self.help_window)
def on_help_back_clicked(self, *args):
self.help_page -= 1
self.help_show_page()
def on_help_forward_clicked(self, *args):
self.help_page += 1
self.help_show_page()
def on_help_button(self, *args):
self.help_page = 0
self.help_list = []
if self.opage == START_PAGE:
self.help_window.set_title(_("Help: Start Page"))
self.help_list = ["start"]
if self.opage == BOOLEANS_PAGE:
self.help_window.set_title(_("Help: Booleans Page"))
self.help_list = ["booleans", "booleans_toggled", "booleans_more", "booleans_more_show"]
if self.opage == FILES_PAGE:
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.help_window.set_title(_("Help: Executable Files Page"))
self.help_list = ["files_exec"]
if ipage == WRITABLE_PAGE:
self.help_window.set_title(_("Help: Writable Files Page"))
self.help_list = ["files_write"]
if ipage == APP_PAGE:
self.help_window.set_title(_("Help: Application Types Page"))
self.help_list = ["files_app"]
if self.opage == NETWORK_PAGE:
ipage = self.inner_notebook_network.get_current_page()
if ipage == OUTBOUND_PAGE:
self.help_window.set_title(_("Help: Outbound Network Connections Page"))
self.help_list = ["ports_outbound"]
if ipage == INBOUND_PAGE:
self.help_window.set_title(_("Help: Inbound Network Connections Page"))
self.help_list = ["ports_inbound"]
if self.opage == TRANSITIONS_PAGE:
ipage = self.inner_notebook_transitions.get_current_page()
if ipage == TRANSITIONS_FROM_PAGE:
self.help_window.set_title(_("Help: Transition from application Page"))
self.help_list = ["transition_from", "transition_from_boolean", "transition_from_boolean_1", "transition_from_boolean_2"]
if ipage == TRANSITIONS_TO_PAGE:
self.help_window.set_title(_("Help: Transition into application Page"))
self.help_list = ["transition_to"]
if ipage == TRANSITIONS_FILE_PAGE:
self.help_window.set_title(_("Help: Transition application file Page"))
self.help_list = ["transition_file"]
if self.opage == SYSTEM_PAGE:
self.help_window.set_title(_("Help: Systems Page"))
self.help_list = ["system", "system_boot_mode", "system_current_mode", "system_export", "system_policy_type", "system_relabel"]
if self.opage == LOCKDOWN_PAGE:
self.help_window.set_title(_("Help: Lockdown Page"))
self.help_list = ["lockdown", "lockdown_unconfined", "lockdown_permissive", "lockdown_ptrace"]
if self.opage == LOGIN_PAGE:
self.help_window.set_title(_("Help: Login Page"))
self.help_list = ["login", "login_default"]
if self.opage == USER_PAGE:
self.help_window.set_title(_("Help: SELinux User Page"))
self.help_list = ["users"]
if self.opage == FILE_EQUIV_PAGE:
self.help_window.set_title(_("Help: File Equivalence Page"))
self.help_list = ["file_equiv"]
return self.help_show_page()
def open_combo_menu(self, *args):
if self.popup == 0:
self.popup = 1
location = self.window.get_position()
self.main_selection_window.move(location[0] + 2, location[1] + 65)
self.main_selection_window.show()
else:
self.main_selection_window.hide()
self.popup = 0
def hide_combo_menu(self, *args):
self.main_selection_window.hide()
self.popup = 0
def set_application_label(self, *args):
self.set_application_label = True
def resize_wrap(self, *args):
print(args)
def initialize_system_default_mode(self):
self.enforce_mode = selinux.selinux_getenforcemode()[1]
if self.enforce_mode == ENFORCING:
self.enforce_button = self.enforcing_button_default
if self.enforce_mode == PERMISSIVE:
self.enforce_button = self.permissive_button_default
if self.enforce_mode == DISABLED:
self.enforce_button = self.disabled_button_default
def populate_system_policy(self):
selinux_path = selinux.selinux_path()
types = [x[1] for x in os.walk(selinux_path) if x[0] == selinux_path][0]
types.sort()
ctr = 0
for item in types:
iter = self.system_policy_type_liststore.append()
self.system_policy_type_liststore.set_value(iter, 0, item)
if item == self.initialtype:
self.system_policy_type_combobox.set_active(ctr)
self.typeHistory = ctr
ctr += 1
return ctr
def filter_the_data(self, list, iter, *args):
# When there is no txt in the box show all items in the tree
if self.filter_txt == "":
return True
try:
for x in range(0, list.get_n_columns()):
try:
val = list.get_value(iter, x)
if val == True or val == False or val == None:
continue
# Returns true if filter_txt exists within the val
if(val.find(self.filter_txt) != -1 or val.lower().find(self.filter_txt) != -1):
return True
except AttributeError as TypeError:
pass
except: # ValueError:
pass
return False
def net_update(self, app, netd, protocol, direction, model):
for k in list(netd.keys()):
for t, ports in netd[k]:
pkey = (",".join(ports), protocol)
if pkey in self.cur_dict["port"]:
if self.cur_dict["port"][pkey]["action"] == "-d":
continue
if t != self.cur_dict["port"][pkey]["type"]:
continue
self.network_initial_data_insert(model, ", ".join(ports), t, protocol)
def file_equiv_initialize(self):
self.wait_mouse()
edict = sepolicy.get_file_equiv()
self.file_equiv_liststore.clear()
for f in edict:
iter = self.file_equiv_liststore.append()
if edict[f]["modify"]:
name = self.markup(f)
equiv = self.markup(edict[f]["equiv"])
else:
name = f
equiv = edict[f]["equiv"]
self.file_equiv_liststore.set_value(iter, 0, name)
self.file_equiv_liststore.set_value(iter, 1, equiv)
self.file_equiv_liststore.set_value(iter, 2, edict[f]["modify"])
self.ready_mouse()
def user_initialize(self):
self.wait_mouse()
self.user_liststore.clear()
for u in sepolicy.get_selinux_users():
iter = self.user_liststore.append()
self.user_liststore.set_value(iter, 0, str(u["name"]))
roles = u["roles"]
if "object_r" in roles:
roles.remove("object_r")
self.user_liststore.set_value(iter, 1, ", ".join(roles))
self.user_liststore.set_value(iter, 2, u["level"])
self.user_liststore.set_value(iter, 3, u["range"])
self.user_liststore.set_value(iter, 4, True)
self.ready_mouse()
def login_initialize(self):
self.wait_mouse()
self.login_liststore.clear()
for u in sepolicy.get_login_mappings():
iter = self.login_liststore.append()
self.login_liststore.set_value(iter, 0, u["name"])
self.login_liststore.set_value(iter, 1, u["seuser"])
self.login_liststore.set_value(iter, 2, u["mls"])
self.login_liststore.set_value(iter, 3, True)
self.ready_mouse()
def network_initialize(self, app):
netd = sepolicy.network.get_network_connect(app, "tcp", "name_connect", check_bools=True)
self.net_update(app, netd, "tcp", OUTBOUND_PAGE, self.network_out_liststore)
netd = sepolicy.network.get_network_connect(app, "tcp", "name_bind", check_bools=True)
self.net_update(app, netd, "tcp", INBOUND_PAGE, self.network_in_liststore)
netd = sepolicy.network.get_network_connect(app, "udp", "name_bind", check_bools=True)
self.net_update(app, netd, "udp", INBOUND_PAGE, self.network_in_liststore)
def network_initial_data_insert(self, model, ports, portType, protocol):
iter = model.append()
model.set_value(iter, 0, ports)
model.set_value(iter, 1, protocol)
model.set_value(iter, 2, portType)
model.set_value(iter, 4, True)
def combo_set_active_text(self, combobox, val):
ctr = 0
liststore = combobox.get_model()
for i in liststore:
if i[0] == val:
combobox.set_active(ctr)
return
ctr += 1
niter = liststore.get_iter(ctr - 1)
if liststore.get_value(niter, 0) == _("More..."):
iter = liststore.insert_before(niter)
ctr = ctr - 1
else:
iter = liststore.append()
liststore.set_value(iter, 0, val)
combobox.set_active(ctr)
def combo_get_active_text(self, combobox):
liststore = combobox.get_model()
index = combobox.get_active()
if index < 0:
return None
iter = liststore.get_iter(index)
return liststore.get_value(iter, 0)
def combo_box_add(self, val, val1):
if val == None:
return
iter = self.application_liststore.append()
self.application_liststore.set_value(iter, 0, val)
self.application_liststore.set_value(iter, 1, val1)
def select_type_more(self, *args):
app = self.moreTypes_treeview.get_selection()
iter = app.get_selected()[1]
if iter == None:
return
app = self.more_types_files_liststore.get_value(iter, 0)
self.combo_set_active_text(self.files_type_combobox, app)
self.closewindow(self.moreTypes_window_files)
def advanced_item_button_push(self, *args):
row = self.advanced_search_treeview.get_selection()
model, iter = row.get_selected()
iter = model.convert_iter_to_child_iter(iter)
iter = self.advanced_search_filter.convert_iter_to_child_iter(iter)
app = self.application_liststore.get_value(iter, 1)
if app == None:
return
self.advanced_filter_entry.set_text('')
self.advanced_search_window.hide()
self.reveal_advanced(self.main_advanced_label)
self.completion_entry.set_text(app)
def advanced_item_selected(self, treeview, path, *args):
iter = self.advanced_search_filter.get_iter(path)
iter = self.advanced_search_filter.convert_iter_to_child_iter(iter)
app = self.application_liststore.get_value(iter, 1)
self.advanced_filter_entry.set_text('')
self.advanced_search_window.hide()
self.reveal_advanced(self.main_advanced_label)
self.completion_entry.set_text(app)
self.application_selected()
def find_application(self, app):
if app and len(app) > 0:
for items in self.application_liststore:
if app == items[0]:
return True
return False
def application_selected(self, *args):
self.show_mislabeled_files_only.set_visible(False)
self.mislabeled_files_label.set_visible(False)
self.warning_files.set_visible(False)
self.filter_entry.set_text('')
app = self.completion_entry.get_text()
if not self.find_application(app):
return
self.show_applications_page()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
# Clear the tree to prepare for a new selection otherwise
self.executable_files_liststore.clear()
# data will pile up everytime the user selects a new item from the drop down menu
self.network_in_liststore.clear()
self.network_out_liststore.clear()
self.boolean_liststore.clear()
self.transitions_into_liststore.clear()
self.transitions_from_treestore.clear()
self.application_files_liststore.clear()
self.writable_files_liststore.clear()
self.transitions_file_liststore.clear()
try:
if app[0] == '/':
app = sepolicy.get_init_transtype(app)
if not app:
return
self.application = app
except IndexError:
pass
self.wait_mouse()
self.previously_modified_initialize(self.dbus.customized())
self.reinit()
self.boolean_initialize(app)
self.mislabeled_files = False
self.executable_files_initialize(app)
self.network_initialize(app)
self.writable_files_initialize(app)
self.transitions_into_initialize(app)
self.transitions_from_initialize(app)
self.application_files_initialize(app)
self.transitions_files_initialize(app)
self.executable_files_tab.set_tooltip_text(_("File path used to enter the '%s' domain." % app))
self.writable_files_tab.set_tooltip_text(_("Files to which the '%s' domain can write." % app))
self.network_out_tab.set_tooltip_text(_("Network Ports to which the '%s' is allowed to connect." % app))
self.network_in_tab.set_tooltip_text(_("Network Ports to which the '%s' is allowed to listen." % app))
self.application_files_tab.set_tooltip_text(_("File Types defined for the '%s'." % app))
self.boolean_radio_button.set_tooltip_text(_("Display boolean information that can be used to modify the policy for the '%s'." % app))
self.files_radio_button.set_tooltip_text(_("Display file type information that can be used by the '%s'." % app))
self.network_radio_button.set_tooltip_text(_("Display network ports to which the '%s' can connect or listen to." % app))
self.transitions_into_tab.set_label(_("Application Transitions Into '%s'" % app))
self.transitions_from_tab.set_label(_("Application Transitions From '%s'" % app))
self.transitions_file_tab.set_label(_("File Transitions From '%s'" % app))
self.transitions_into_tab.set_tooltip_text(_("Executables which will transition to the '%s', when executing a selected domains entrypoint.") % app)
self.transitions_from_tab.set_tooltip_text(_("Executables which will transition to a different domain, when the '%s' executes them.") % app)
self.transitions_file_tab.set_tooltip_text(_("Files by '%s' will transitions to a different label." % app))
self.transitions_radio_button.set_tooltip_text(_("Display applications that can transition into or out of the '%s'." % app))
self.application = app
self.applications_selection_button.set_label(self.application)
self.ready_mouse()
def reinit(self):
sepolicy.reinit()
self.fcdict = sepolicy.get_fcdict()
self.local_file_paths = sepolicy.get_local_file_paths()
def previously_modified_initialize(self, buf):
self.cust_dict = {}
for i in buf.split("\n"):
rec = i.split()
if len(rec) == 0:
continue
if rec[1] == "-D":
continue
if rec[0] not in self.cust_dict:
self.cust_dict[rec[0]] = {}
if rec[0] == "boolean":
self.cust_dict["boolean"][rec[-1]] = {"active": rec[2] == "-1"}
if rec[0] == "login":
self.cust_dict["login"][rec[-1]] = {"seuser": rec[3], "range": rec[5]}
if rec[0] == "interface":
self.cust_dict["interface"][rec[-1]] = {"type": rec[3]}
if rec[0] == "user":
self.cust_dict["user"][rec[-1]] = {"level": "s0", "range": rec[3], "role": rec[5]}
if rec[0] == "port":
self.cust_dict["port"][(rec[-1], rec[-2])] = {"type": rec[3]}
if rec[0] == "node":
self.cust_dict["node"][rec[-1]] = {"mask": rec[3], "protocol": rec[5], "type": rec[7]}
if rec[0] == "fcontext":
if rec[2] == "-e":
if "fcontext-equiv" not in self.cust_dict:
self.cust_dict["fcontext-equiv"] = {}
self.cust_dict["fcontext-equiv"][(rec[-1])] = {"equiv": rec[3]}
else:
self.cust_dict["fcontext"][(rec[-1], rec[3])] = {"type": rec[5]}
if rec[0] == "module":
self.cust_dict["module"][rec[-1]] = {"enabled": rec[2] != "-d"}
for i in keys:
if i not in self.cust_dict:
self.cust_dict.update({i: {}})
if not self.cust_dict["module"]:
return
for semodule, button in [("unconfined", self.disable_unconfined_button), ("permissivedomains", self.disable_permissive_button)]:
if semodule in self.cust_dict["module"]:
button.set_active(self.cust_dict["module"][semodule]["enabled"])
def executable_files_initialize(self, application):
self.entrypoints = sepolicy.get_entrypoints(application)
for exe in list(self.entrypoints.keys()):
if len(self.entrypoints[exe]) == 0:
continue
file_class = self.entrypoints[exe][1]
for path in self.entrypoints[exe][0]:
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if exe != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.executable_files_liststore, path, exe, file_class)
def mislabeled(self, path):
try:
con = selinux.matchpathcon(path, 0)[1]
cur = selinux.getfilecon(path)[1]
return con != cur
except OSError:
return False
def set_mislabeled(self, tree, path, iter, niter):
if not self.mislabeled(path):
return
con = selinux.matchpathcon(path, 0)[1]
cur = selinux.getfilecon(path)[1]
self.mislabeled_files = True
# Set visibility of label
tree.set_value(niter, 3, True)
# Has a mislabel
tree.set_value(iter, 4, True)
tree.set_value(niter, 4, True)
tree.set_value(iter, 5, con.split(":")[2])
tree.set_value(iter, 6, cur.split(":")[2])
def writable_files_initialize(self, application):
# Traversing the dictionary data struct
self.writable_files = sepolicy.get_writable_files(application)
for write in list(self.writable_files.keys()):
if len(self.writable_files[write]) < 2:
self.files_initial_data_insert(self.writable_files_liststore, None, write, _("all files"))
continue
file_class = self.writable_files[write][1]
for path in self.writable_files[write][0]:
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if write != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.writable_files_liststore, path, write, file_class)
def files_initial_data_insert(self, liststore, path, seLinux_label, file_class):
iter = liststore.append(None)
if path == None:
path = _("MISSING FILE PATH")
modify = False
else:
modify = (path, file_class) in self.local_file_paths
for p in sepolicy.find_file(path):
niter = liststore.append(iter)
liststore.set_value(niter, 0, p)
self.set_mislabeled(liststore, p, iter, niter)
if modify:
path = self.markup(path)
file_class = self.markup(selinux_label)
file_class = self.markup(file_class)
liststore.set_value(iter, 0, path)
liststore.set_value(iter, 1, seLinux_label)
liststore.set_value(iter, 2, file_class)
liststore.set_value(iter, 7, modify)
def markup(self, f):
return "<b>%s</b>" % f
def unmarkup(self, f):
if f:
return re.sub("</b>$", "", re.sub("^<b>", "", f))
return None
def application_files_initialize(self, application):
self.file_types = sepolicy.get_file_types(application)
for app in list(self.file_types.keys()):
if len(self.file_types[app]) == 0:
continue
file_class = self.file_types[app][1]
for path in self.file_types[app][0]:
desc = sepolicy.get_description(app, markup=self.markup)
if (path, file_class) in self.cur_dict["fcontext"]:
if self.cur_dict["fcontext"][(path, file_class)]["action"] == "-d":
continue
if app != self.cur_dict["fcontext"][(path, file_class)]["type"]:
continue
self.files_initial_data_insert(self.application_files_liststore, path, desc, file_class)
def modified(self):
i = 0
for k in self.cur_dict:
if len(self.cur_dict[k]) > 0:
return True
return False
def boolean_initialize(self, application):
for blist in sepolicy.get_bools(application):
for b, active in blist:
if b in self.cur_dict["boolean"]:
active = self.cur_dict["boolean"][b]['active']
desc = sepolicy.boolean_desc(b)
self.boolean_initial_data_insert(b, desc, active)
def boolean_initial_data_insert(self, val, desc, active):
# Insert data from data source into tree
iter = self.boolean_liststore.append()
self.boolean_liststore.set_value(iter, 0, active)
self.boolean_liststore.set_value(iter, 1, desc)
self.boolean_liststore.set_value(iter, 2, val)
self.boolean_liststore.set_value(iter, 3, _('More...'))
def transitions_into_initialize(self, application):
for x in sepolicy.get_transitions_into(application):
active = None
executable = None
source = None
if "boolean" in x:
active = x["boolean"]
if "target" in x:
executable = x["target"]
if "source" in x:
source = x["source"]
self.transitions_into_initial_data_insert(active, executable, source)
def transitions_into_initial_data_insert(self, active, executable, source):
iter = self.transitions_into_liststore.append()
if active != None:
self.transitions_into_liststore.set_value(iter, 0, enabled[active[0][1]]) # active[0][1] is either T or F (enabled is all the way at the top)
else:
self.transitions_into_liststore.set_value(iter, 0, "Default")
self.transitions_into_liststore.set_value(iter, 2, executable)
self.transitions_into_liststore.set_value(iter, 1, source)
def transitions_from_initialize(self, application):
for x in sepolicy.get_transitions(application):
active = None
executable = None
transtype = None
if "boolean" in x:
active = x["boolean"]
if "target" in x:
executable_type = x["target"]
if "transtype" in x:
transtype = x["transtype"]
self.transitions_from_initial_data_insert(active, executable_type, transtype)
try:
for executable in self.fcdict[executable_type]["regex"]:
self.transitions_from_initial_data_insert(active, executable, transtype)
except KeyError:
pass
def transitions_from_initial_data_insert(self, active, executable, transtype):
iter = self.transitions_from_treestore.append(None)
if active == None:
self.transitions_from_treestore.set_value(iter, 0, "Default")
self.transitions_from_treestore.set_value(iter, 5, False)
else:
niter = self.transitions_from_treestore.append(iter)
# active[0][1] is either T or F (enabled is all the way at the top)
self.transitions_from_treestore.set_value(iter, 0, enabled[active[0][1]])
markup = '<span foreground="blue"><u>%s</u></span>'
if active[0][1]:
self.transitions_from_treestore.set_value(niter, 2, (_("To disable this transition, go to the " + markup % _("Boolean section."))))
else:
self.transitions_from_treestore.set_value(niter, 2, (_("To enable this transition, go to the " + markup % _("Boolean section."))))
# active[0][0] is the Bool Name
self.transitions_from_treestore.set_value(niter, 1, active[0][0])
self.transitions_from_treestore.set_value(niter, 5, True)
self.transitions_from_treestore.set_value(iter, 2, executable)
self.transitions_from_treestore.set_value(iter, 3, transtype)
def transitions_files_initialize(self, application):
for i in sepolicy.get_file_transitions(application):
if 'filename' in i:
filename = i['filename']
else:
filename = None
self.transitions_files_inital_data_insert(i['target'], i['class'], i['transtype'], filename)
def transitions_files_inital_data_insert(self, path, tclass, dest, name):
iter = self.transitions_file_liststore.append()
self.transitions_file_liststore.set_value(iter, 0, path)
self.transitions_file_liststore.set_value(iter, 1, tclass)
self.transitions_file_liststore.set_value(iter, 2, dest)
if name == None:
name = '*'
self.transitions_file_liststore.set_value(iter, 3, name)
def tab_change(self, *args):
self.clear_filters()
self.treeview = None
self.treesort = None
self.treefilter = None
self.liststore = None
self.modify_button.set_sensitive(False)
self.add_modify_delete_box.hide()
self.show_modified_only.set_visible(False)
self.show_mislabeled_files_only.set_visible(False)
self.mislabeled_files_label.set_visible(False)
self.warning_files.set_visible(False)
if self.boolean_radio_button.get_active():
self.outer_notebook.set_current_page(BOOLEANS_PAGE)
self.treeview = self.boolean_treeview
self.show_modified_only.set_visible(True)
if self.files_radio_button.get_active():
self.show_popup(self.add_modify_delete_box)
self.show_modified_only.set_visible(True)
self.show_mislabeled_files_only.set_visible(self.mislabeled_files)
self.mislabeled_files_label.set_visible(self.mislabeled_files)
self.warning_files.set_visible(self.mislabeled_files)
self.outer_notebook.set_current_page(FILES_PAGE)
if args[0] == self.inner_notebook_files:
ipage = args[2]
else:
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.treeview = self.executable_files_treeview
category = _("executable")
elif ipage == WRITABLE_PAGE:
self.treeview = self.writable_files_treeview
category = _("writable")
elif ipage == APP_PAGE:
self.treeview = self.application_files_treeview
category = _("application")
self.add_button.set_tooltip_text(_("Add new %(TYPE)s file path for '%(DOMAIN)s' domains.") % {"TYPE": category, "DOMAIN": self.application})
self.delete_button.set_tooltip_text(_("Delete %(TYPE)s file paths for '%(DOMAIN)s' domain.") % {"TYPE": category, "DOMAIN": self.application})
self.modify_button.set_tooltip_text(_("Modify %(TYPE)s file path for '%(DOMAIN)s' domain. Only bolded items in the list can be selected, this indicates they were modified previously.") % {"TYPE": category, "DOMAIN": self.application})
if self.network_radio_button.get_active():
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.outer_notebook.set_current_page(NETWORK_PAGE)
if args[0] == self.inner_notebook_network:
ipage = args[2]
else:
ipage = self.inner_notebook_network.get_current_page()
if ipage == OUTBOUND_PAGE:
self.treeview = self.network_out_treeview
category = _("connect")
if ipage == INBOUND_PAGE:
self.treeview = self.network_in_treeview
category = _("listen for inbound connections")
self.add_button.set_tooltip_text(_("Add new port definition to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
self.delete_button.set_tooltip_text(_("Delete modified port definitions to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
self.modify_button.set_tooltip_text(_("Modify port definitions to which the '%(APP)s' domain is allowed to %(PERM)s.") % {"APP": self.application, "PERM": category})
if self.transitions_radio_button.get_active():
self.outer_notebook.set_current_page(TRANSITIONS_PAGE)
if args[0] == self.inner_notebook_transitions:
ipage = args[2]
else:
ipage = self.inner_notebook_transitions.get_current_page()
if ipage == TRANSITIONS_FROM_PAGE:
self.treeview = self.transitions_from_treeview
if ipage == TRANSITIONS_TO_PAGE:
self.treeview = self.transitions_into_treeview
if ipage == TRANSITIONS_FILE_PAGE:
self.treeview = self.transitions_file_treeview
if self.system_radio_button.get_active():
self.outer_notebook.set_current_page(SYSTEM_PAGE)
self.filter_box.hide()
if self.lockdown_radio_button.get_active():
self.lockdown_init()
self.outer_notebook.set_current_page(LOCKDOWN_PAGE)
self.filter_box.hide()
if self.user_radio_button.get_active():
self.outer_notebook.set_current_page(USER_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.user_treeview
self.add_button.set_tooltip_text(_("Add new SELinux User/Role definition."))
self.delete_button.set_tooltip_text(_("Delete modified SELinux User/Role definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified SELinux User/Role definitions."))
if self.login_radio_button.get_active():
self.outer_notebook.set_current_page(LOGIN_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.login_treeview
self.add_button.set_tooltip_text(_("Add new Login Mapping definition."))
self.delete_button.set_tooltip_text(_("Delete modified Login Mapping definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified Login Mapping definitions."))
if self.file_equiv_radio_button.get_active():
self.outer_notebook.set_current_page(FILE_EQUIV_PAGE)
self.add_modify_delete_box.show()
self.show_modified_only.set_visible(True)
self.treeview = self.file_equiv_treeview
self.add_button.set_tooltip_text(_("Add new File Equivalence definition."))
self.delete_button.set_tooltip_text(_("Delete modified File Equivalence definitions."))
self.modify_button.set_tooltip_text(_("Modify selected modified File Equivalence definitions. Only bolded items in the list can be selected, this indicates they were modified previously."))
self.opage = self.outer_notebook.get_current_page()
if self.treeview:
self.filter_box.show()
self.treesort = self.treeview.get_model()
self.treefilter = self.treesort.get_model()
self.liststore = self.treefilter.get_model()
for x in range(0, self.liststore.get_n_columns()):
col = self.treeview.get_column(x)
if col:
cell = col.get_cells()[0]
if isinstance(cell, Gtk.CellRendererText):
self.liststore.set_sort_func(x, self.stripsort, None)
self.treeview.get_selection().unselect_all()
self.modify_button.set_sensitive(False)
def stripsort(self, model, row1, row2, user_data):
sort_column, _ = model.get_sort_column_id()
val1 = self.unmarkup(model.get_value(row1, sort_column))
if val1 is None:
val1 = ""
val2 = self.unmarkup(model.get_value(row2, sort_column))
if val2 is None:
val2 = ""
return (val1 > val2) - (val1 < val2)
def display_more_detail(self, windows, path):
it = self.boolean_filter.get_iter(path)
it = self.boolean_filter.convert_iter_to_child_iter(it)
self.boolean_more_detail_tree_data_set.clear()
self.boolean_more_detail_window.set_title(_("Boolean %s Allow Rules") % self.boolean_liststore.get_value(it, 2))
blist = sepolicy.get_boolean_rules(self.application, self.boolean_liststore.get_value(it, 2))
for b in blist:
self.display_more_detail_init(b["source"], b["target"], b["class"], b["permlist"])
self.show_popup(self.boolean_more_detail_window)
def display_more_detail_init(self, source, target, class_type, permission):
iter = self.boolean_more_detail_tree_data_set.append()
self.boolean_more_detail_tree_data_set.set_value(iter, 0, "allow %s %s:%s { %s };" % (source, target, class_type, " ".join(permission)))
def add_button_clicked(self, *args):
self.modify = False
if self.opage == NETWORK_PAGE:
self.popup_network_label.set_text((_("Add Network Port for %s. Ports will be created when update is applied.")) % self.application)
self.network_popup_window.set_title((_("Add Network Port for %s")) % self.application)
self.init_network_dialog(args)
return
if self.opage == FILES_PAGE:
self.popup_files_label.set_text((_("Add File Labeling for %s. File labels will be created when update is applied.")) % self.application)
self.files_popup_window.set_title((_("Add File Labeling for %s")) % self.application)
self.init_files_dialog(args)
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
self.files_path_entry.set_text("ex: /usr/sbin/Foobar")
else:
self.files_path_entry.set_text("ex: /var/lib/Foobar")
self.clear_entry = True
if self.opage == LOGIN_PAGE:
self.login_label.set_text((_("Add Login Mapping. User Mapping will be created when Update is applied.")))
self.login_popup_window.set_title(_("Add Login Mapping"))
self.login_init_dialog(args)
self.clear_entry = True
if self.opage == USER_PAGE:
self.user_label.set_text((_("Add SELinux User Role. SELinux user roles will be created when update is applied.")))
self.user_popup_window.set_title(_("Add SELinux Users"))
self.user_init_dialog(args)
self.clear_entry = True
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_source_entry.set_text("")
self.file_equiv_dest_entry.set_text("")
self.file_equiv_label.set_text((_("Add File Equivalency Mapping. Mapping will be created when update is applied.")))
self.file_equiv_popup_window.set_title(_("Add SELinux File Equivalency"))
self.clear_entry = True
self.show_popup(self.file_equiv_popup_window)
self.new_updates()
def show_popup(self, window):
self.current_popup = window
window.show()
def close_popup(self, *args):
self.current_popup.hide()
self.window.set_sensitive(True)
return True
def modify_button_clicked(self, *args):
iter = None
if self.treeview:
iter = self.get_selected_iter()
if not iter:
self.modify_button.set_sensitive(False)
return
self.modify = True
if self.opage == NETWORK_PAGE:
self.modify_button_network_clicked(args)
if self.opage == FILES_PAGE:
self.popup_files_label.set_text((_("Modify File Labeling for %s. File labels will be created when update is applied.")) % self.application)
self.files_popup_window.set_title((_("Add File Labeling for %s")) % self.application)
self.delete_old_item = None
self.init_files_dialog(args)
self.modify = True
operation = "Modify"
mls = 1
ipage = self.inner_notebook_files.get_current_page()
if ipage == EXE_PAGE:
iter = self.executable_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.executable_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
ftype = self.executable_files_liststore.get_value(iter, 1)
if type != None:
self.combo_set_active_text(self.files_type_combobox, ftype)
tclass = self.executable_files_liststore.get_value(iter, 2)
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if ipage == WRITABLE_PAGE:
iter = self.writable_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.writable_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
type = self.writable_files_liststore.get_value(iter, 1)
if type != None:
self.combo_set_active_text(self.files_type_combobox, type)
tclass = self.writable_files_liststore.get_value(iter, 2)
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if ipage == APP_PAGE:
iter = self.application_files_filter.convert_iter_to_child_iter(iter)
self.delete_old_item = iter
path = self.application_files_liststore.get_value(iter, 0)
self.files_path_entry.set_text(path)
try:
get_type = self.application_files_liststore.get_value(iter, 1)
get_type = get_type.split("<b>")[1].split("</b>")
except AttributeError:
pass
type = self.application_files_liststore.get_value(iter, 2)
if type != None:
self.combo_set_active_text(self.files_type_combobox, type)
tclass = get_type[0]
if tclass != None:
self.combo_set_active_text(self.files_class_combobox, tclass)
if self.opage == USER_PAGE:
self.user_init_dialog(args)
self.user_name_entry.set_text(self.user_liststore.get_value(iter, 0))
self.user_mls_level_entry.set_text(self.user_liststore.get_value(iter, 2))
self.user_mls_entry.set_text(self.user_liststore.get_value(iter, 3))
self.combo_set_active_text(self.user_roles_combobox, self.user_liststore.get_value(iter, 1))
self.user_label.set_text((_("Modify SELinux User Role. SELinux user roles will be modified when update is applied.")))
self.user_popup_window.set_title(_("Modify SELinux Users"))
self.show_popup(self.user_popup_window)
if self.opage == LOGIN_PAGE:
self.login_init_dialog(args)
self.login_name_entry.set_text(self.login_liststore.get_value(iter, 0))
self.login_mls_entry.set_text(self.login_liststore.get_value(iter, 2))
self.combo_set_active_text(self.login_seuser_combobox, self.login_liststore.get_value(iter, 1))
self.login_label.set_text((_("Modify Login Mapping. Login Mapping will be modified when Update is applied.")))
self.login_popup_window.set_title(_("Modify Login Mapping"))
self.show_popup(self.login_popup_window)
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_source_entry.set_text(self.file_equiv_liststore.get_value(iter, 0))
self.file_equiv_dest_entry.set_text(self.file_equiv_liststore.get_value(iter, 1))
self.file_equiv_label.set_text((_("Modify File Equivalency Mapping. Mapping will be created when update is applied.")))
self.file_equiv_popup_window.set_title(_("Modify SELinux File Equivalency"))
self.clear_entry = True
self.show_popup(self.file_equiv_popup_window)
def populate_type_combo(self, tree, loc, *args):
iter = self.more_types_files_liststore.get_iter(loc)
ftype = self.more_types_files_liststore.get_value(iter, 0)
self.combo_set_active_text(self.files_type_combobox, ftype)
self.show_popup(self.files_popup_window)
self.moreTypes_window_files.hide()
def strip_domain(self, domain):
if domain == None:
return
if domain.endswith("_script_t"):
split_char = "_script_t"
else:
split_char = "_t"
return domain.split(split_char)[0]
def exclude_type(self, type, exclude_list):
for e in exclude_list:
if type.startswith(e):
return True
return False
def init_files_dialog(self, *args):
exclude_list = []
self.files_class_combobox.set_sensitive(True)
self.show_popup(self.files_popup_window)
ipage = self.inner_notebook_files.get_current_page()
self.files_type_combolist.clear()
self.files_class_combolist.clear()
compare = self.strip_domain(self.application)
for d in self.application_liststore:
if d[0].startswith(compare) and d[0] != self.application and not d[0].startswith("httpd_sys"):
exclude_list.append(self.strip_domain(d[0]))
self.more_types_files_liststore.clear()
try:
for files in sepolicy.file_type_str:
iter = self.files_class_combolist.append()
self.files_class_combolist.set_value(iter, 0, sepolicy.file_type_str[files])
if ipage == EXE_PAGE and self.entrypoints != None:
for exe in list(self.entrypoints.keys()):
if exe.startswith(compare):
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, exe)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, exe)
self.files_class_combobox.set_active(4)
self.files_class_combobox.set_sensitive(False)
elif ipage == WRITABLE_PAGE and self.writable_files != None:
for write in list(self.writable_files.keys()):
if write.startswith(compare) and not self.exclude_type(write, exclude_list) and write in self.file_types:
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, write)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, write)
self.files_class_combobox.set_active(0)
elif ipage == APP_PAGE and self.file_types != None:
for app in sepolicy.get_all_file_types():
if app.startswith(compare):
if app.startswith(compare) and not self.exclude_type(app, exclude_list):
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, app)
iter = self.more_types_files_liststore.append()
self.more_types_files_liststore.set_value(iter, 0, app)
self.files_class_combobox.set_active(0)
except AttributeError:
print("error")
pass
self.files_type_combobox.set_active(0)
self.files_mls_entry.set_text("s0")
iter = self.files_type_combolist.append()
self.files_type_combolist.set_value(iter, 0, _('More...'))
def modify_button_network_clicked(self, *args):
iter = self.get_selected_iter()
if not iter:
self.modify_button.set_sensitive(False)
return
self.popup_network_label.set_text((_("Modify Network Port for %s. Ports will be created when update is applied.")) % self.application)
self.network_popup_window.set_title((_("Modify Network Port for %s")) % self.application)
self.delete_old_item = None
self.init_network_dialog(args)
operation = "Modify"
mls = 1
self.modify = True
iter = self.get_selected_iter()
port = self.liststore.get_value(iter, 0)
self.network_ports_entry.set_text(port)
protocol = self.liststore.get_value(iter, 1)
if protocol == "tcp":
self.network_tcp_button.set_active(True)
elif protocol == "udp":
self.network_udp_button.set_active(True)
type = self.liststore.get_value(iter, 2)
if type != None:
self.combo_set_active_text(self.network_port_type_combobox, type)
self.delete_old_item = iter
def init_network_dialog(self, *args):
self.show_popup(self.network_popup_window)
ipage = self.inner_notebook_network.get_current_page()
self.network_port_type_combolist.clear()
self.network_ports_entry.set_text("")
try:
if ipage == OUTBOUND_PAGE:
netd = sepolicy.network.get_network_connect(self.application, "tcp", "name_connect", check_bools=True)
elif ipage == INBOUND_PAGE:
netd = sepolicy.network.get_network_connect(self.application, "tcp", "name_bind", check_bools=True)
netd += sepolicy.network.get_network_connect(self.application, "udp", "name_bind", check_bools=True)
port_types = []
for k in list(netd.keys()):
for t, ports in netd[k]:
if t not in port_types + ["port_t", "unreserved_port_t"]:
if t.endswith("_type"):
continue
port_types.append(t)
port_types.sort()
short_domain = self.strip_domain(self.application)
if short_domain[-1] == "d":
short_domain = short_domain[:-1]
short_domain = short_domain + "_"
ctr = 0
found = 0
for t in port_types:
if t.startswith(short_domain):
found = ctr
iter = self.network_port_type_combolist.append()
self.network_port_type_combolist.set_value(iter, 0, t)
ctr += 1
self.network_port_type_combobox.set_active(found)
except AttributeError:
pass
self.network_tcp_button.set_active(True)
self.network_mls_entry.set_text("s0")
def login_seuser_combobox_change(self, combo, *args):
seuser = self.combo_get_active_text(combo)
if self.login_mls_entry.get_text() == "":
for u in sepolicy.get_selinux_users():
if seuser == u['name']:
self.login_mls_entry.set_text(u['range'])
def user_roles_combobox_change(self, combo, *args):
serole = self.combo_get_active_text(combo)
if self.user_mls_entry.get_text() == "":
for u in sepolicy.get_all_roles():
if serole == u['name']:
self.user_mls_entry.set_text(u['range'])
def get_selected_iter(self):
iter = None
if not self.treeview:
return None
row = self.treeview.get_selection()
if not row:
return None
treesort, iter = row.get_selected()
if iter:
iter = treesort.convert_iter_to_child_iter(iter)
if iter:
iter = self.treefilter.convert_iter_to_child_iter(iter)
return iter
def cursor_changed(self, *args):
self.modify_button.set_sensitive(False)
iter = self.get_selected_iter()
if iter == None:
self.modify_button.set_sensitive(False)
return
if not self.liststore[iter] or not self.liststore[iter][-1]:
return
self.modify_button.set_sensitive(self.liststore[iter][-1])
def login_init_dialog(self, *args):
self.show_popup(self.login_popup_window)
self.login_seuser_combolist.clear()
users = sepolicy.get_all_users()
users.sort()
for u in users:
iter = self.login_seuser_combolist.append()
self.login_seuser_combolist.set_value(iter, 0, str(u))
self.login_name_entry.set_text("")
self.login_mls_entry.set_text("")
def user_init_dialog(self, *args):
self.show_popup(self.user_popup_window)
self.user_roles_combolist.clear()
roles = sepolicy.get_all_roles()
roles.sort()
for r in roles:
iter = self.user_roles_combolist.append()
self.user_roles_combolist.set_value(iter, 0, str(r))
self.user_name_entry.set_text("")
self.user_mls_entry.set_text("")
def on_disable_ptrace(self, checkbutton):
if self.finish_init:
update_buffer = "boolean -m -%d deny_ptrace" % checkbutton.get_active()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
self.error(e)
self.ready_mouse()
def on_show_modified_only(self, checkbutton):
length = self.liststore.get_n_columns()
def dup_row(row):
l = []
for i in range(0, length):
l.append(row[i])
return l
append_list = []
if self.opage == BOOLEANS_PAGE:
if not checkbutton.get_active():
return self.boolean_initialize(self.application)
for row in self.liststore:
if row[2] in self.cust_dict["boolean"]:
append_list.append(dup_row(row))
if self.opage == FILES_PAGE:
ipage = self.inner_notebook_files.get_current_page()
if not checkbutton.get_active():
if ipage == EXE_PAGE:
return self.executable_files_initialize(self.application)
if ipage == WRITABLE_PAGE:
return self.writable_files_initialize(self.application)
if ipage == APP_PAGE:
return self.application_files_initialize(self.application)
for row in self.liststore:
if (row[0], row[2]) in self.cust_dict["fcontext"]:
append_list.append(row)
if self.opage == NETWORK_PAGE:
if not checkbutton.get_active():
return self.network_initialize(self.application)
for row in self.liststore:
if (row[0], row[1]) in self.cust_dict["port"]:
append_list.append(dup_row(row))
if self.opage == FILE_EQUIV_PAGE:
if not checkbutton.get_active() == True:
return self.file_equiv_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["fcontext-equiv"]:
append_list.append(dup_row(row))
if self.opage == USER_PAGE:
if not checkbutton.get_active():
return self.user_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["user"]:
append_list.append(dup_row(row))
if self.opage == LOGIN_PAGE:
if not checkbutton.get_active() == True:
return self.login_initialize()
for row in self.liststore:
if row[0] in self.cust_dict["login"]:
append_list.append(dup_row(row))
self.liststore.clear()
for row in append_list:
iter = self.liststore.append()
for i in range(0, length):
self.liststore.set_value(iter, i, row[i])
def init_modified_files_liststore(self, tree, app, ipage, operation, path, fclass, ftype):
iter = tree.append(None)
tree.set_value(iter, 0, path)
tree.set_value(iter, 1, ftype)
tree.set_value(iter, 2, fclass)
def restore_to_default(self, *args):
print("restore to defualt clicked...")
def invalid_entry_retry(self, *args):
self.closewindow(self.error_check_window)
self.files_popup_window.set_sensitive(True)
self.network_popup_window.set_sensitive(True)
def error_check_files(self, insert_txt):
if len(insert_txt) == 0 or insert_txt[0] != '/':
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("The entry '%s' is not a valid path. Paths must begin with a '/'.")) % insert_txt)
return True
return False
def error_check_network(self, port):
try:
pnum = int(port)
if pnum < 1 or pnum > 65536:
raise ValueError
except ValueError:
self.error_check_window.show()
self.files_popup_window.set_sensitive(False)
self.network_popup_window.set_sensitive(False)
self.error_check_label.set_text((_("Port number must be between 1 and 65536")))
return True
return False
def show_more_types(self, *args):
if self.finish_init:
if self.combo_get_active_text(self.files_type_combobox) == _('More...'):
self.files_popup_window.hide()
self.moreTypes_window_files.show()
def update_to_login(self, *args):
self.close_popup()
seuser = self.combo_get_active_text(self.login_seuser_combobox)
mls_range = self.login_mls_entry.get_text()
name = self.login_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.login_liststore.get_value(iter, 0)
oldseuser = self.login_liststore.get_value(iter, 1)
oldrange = self.login_liststore.get_value(iter, 2)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldseuser)
self.liststore.set_value(iter, 2, oldrange)
self.cur_dict["login"][name] = {"action": "-m", "range": mls_range, "seuser": seuser, "oldrange": oldrange, "oldseuser": oldseuser, "oldname": oldname}
else:
iter = self.liststore.append(None)
self.cur_dict["login"][name] = {"action": "-a", "range": mls_range, "seuser": seuser}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, seuser)
self.liststore.set_value(iter, 2, mls_range)
self.new_updates()
def update_to_user(self, *args):
self.close_popup()
roles = self.combo_get_active_text(self.user_roles_combobox)
level = self.user_mls_level_entry.get_text()
mls_range = self.user_mls_entry.get_text()
name = self.user_name_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldname = self.user_liststore.get_value(iter, 0)
oldroles = self.user_liststore.get_value(iter, 1)
oldlevel = self.user_liststore.get_value(iter, 1)
oldrange = self.user_liststore.get_value(iter, 3)
self.liststore.set_value(iter, 0, oldname)
self.liststore.set_value(iter, 1, oldroles)
self.liststore.set_value(iter, 2, oldlevel)
self.liststore.set_value(iter, 3, oldrange)
self.cur_dict["user"][name] = {"action": "-m", "range": mls_range, "level": level, "role": roles, "oldrange": oldrange, "oldlevel": oldlevel, "oldroles": oldroles, "oldname": oldname}
else:
iter = self.liststore.append(None)
self.cur_dict["user"][name] = {"action": "-a", "range": mls_range, "level": level, "role": roles}
self.liststore.set_value(iter, 0, name)
self.liststore.set_value(iter, 1, roles)
self.liststore.set_value(iter, 2, level)
self.liststore.set_value(iter, 3, mls_range)
self.new_updates()
def update_to_file_equiv(self, *args):
self.close_popup()
dest = self.file_equiv_dest_entry.get_text()
src = self.file_equiv_source_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
olddest = self.unmarkup(self.liststore.set_value(iter, 0))
oldsrc = self.unmarkup(self.liststore.set_value(iter, 1))
self.cur_dict["fcontext-equiv"][dest] = {"action": "-m", "src": src, "oldsrc": oldsrc, "olddest": olddest}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext-equiv"][dest] = {"action": "-a", "src": src}
self.liststore.set_value(iter, 0, self.markup(dest))
self.liststore.set_value(iter, 1, self.markup(src))
def update_to_files(self, *args):
self.close_popup()
self.files_add = True
# Insert Function will be used in the future
path = self.files_path_entry.get_text()
if self.error_check_files(path):
return
setype = self.combo_get_active_text(self.files_type_combobox)
mls = self.files_mls_entry.get_text()
tclass = self.combo_get_active_text(self.files_class_combobox)
if self.modify:
iter = self.get_selected_iter()
oldpath = self.unmark(self.liststore.get_value(iter, 0))
setype = self.unmark(self.liststore.set_value(iter, 1))
oldtclass = self.liststore.get_value(iter, 2)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-m", "type": setype, "oldtype": oldsetype, "oldmls": oldmls, "oldclass": oldclass}
else:
iter = self.liststore.append(None)
self.cur_dict["fcontext"][(path, tclass)] = {"action": "-a", "type": setype}
self.liststore.set_value(iter, 0, self.markup(path))
self.liststore.set_value(iter, 1, self.markup(setype))
self.liststore.set_value(iter, 2, self.markup(tclass))
self.files_add = False
self.recursive_path_toggle.set_active(False)
self.new_updates()
def update_to_network(self, *args):
self.network_add = True
ports = self.network_ports_entry.get_text()
if self.error_check_network(ports):
return
if self.network_tcp_button.get_active():
protocol = "tcp"
else:
protocol = "udp"
setype = self.combo_get_active_text(self.network_port_type_combobox)
mls = self.network_mls_entry.get_text()
if self.modify:
iter = self.get_selected_iter()
oldports = self.unmark(self.liststore.get_value(iter, 0))
oldprotocol = self.unmark(self.liststore.get_value(iter, 1))
oldsetype = self.unmark(self.liststore.set_value(iter, 2))
self.cur_dict["port"][(ports, protocol)] = {"action": "-m", "type": setype, "mls": mls, "oldtype": oldsetype, "oldmls": oldmls, "oldprotocol": oldprotocol, "oldports": oldports}
else:
iter = self.liststore.append(None)
self.cur_dict["port"][(ports, protocol)] = {"action": "-a", "type": setype, "mls": mls}
self.liststore.set_value(iter, 0, ports)
self.liststore.set_value(iter, 1, protocol)
self.liststore.set_value(iter, 2, setype)
self.network_add = False
self.network_popup_window.hide()
self.window.set_sensitive(True)
self.new_updates()
def delete_button_clicked(self, *args):
operation = "Add"
self.window.set_sensitive(False)
if self.opage == NETWORK_PAGE:
self.network_delete_liststore.clear()
port_dict = self.cust_dict["port"]
for ports, protocol in port_dict:
setype = port_dict[(ports, protocol)]["type"]
iter = self.network_delete_liststore.append()
self.network_delete_liststore.set_value(iter, 1, ports)
self.network_delete_liststore.set_value(iter, 2, protocol)
self.network_delete_liststore.set_value(iter, 3, setype)
self.show_popup(self.network_delete_window)
return
if self.opage == FILES_PAGE:
self.files_delete_liststore.clear()
fcontext_dict = self.cust_dict["fcontext"]
for path, tclass in fcontext_dict:
setype = fcontext_dict[(path, tclass)]["type"]
iter = self.files_delete_liststore.append()
self.files_delete_liststore.set_value(iter, 1, path)
self.files_delete_liststore.set_value(iter, 2, setype)
self.files_delete_liststore.set_value(iter, 3, sepolicy.file_type_str[tclass])
self.show_popup(self.files_delete_window)
return
if self.opage == USER_PAGE:
self.user_delete_liststore.clear()
user_dict = self.cust_dict["user"]
for user in user_dict:
roles = user_dict[user]["role"]
mls = user_dict[user]["range"]
level = user_dict[user]["level"]
iter = self.user_delete_liststore.append()
self.user_delete_liststore.set_value(iter, 1, user)
self.user_delete_liststore.set_value(iter, 2, roles)
self.user_delete_liststore.set_value(iter, 3, level)
self.user_delete_liststore.set_value(iter, 4, mls)
self.show_popup(self.user_delete_window)
return
if self.opage == LOGIN_PAGE:
self.login_delete_liststore.clear()
login_dict = self.cust_dict["login"]
for login in login_dict:
seuser = login_dict[login]["seuser"]
mls = login_dict[login]["range"]
iter = self.login_delete_liststore.append()
self.login_delete_liststore.set_value(iter, 1, seuser)
self.login_delete_liststore.set_value(iter, 2, login)
self.login_delete_liststore.set_value(iter, 3, mls)
self.show_popup(self.login_delete_window)
return
if self.opage == FILE_EQUIV_PAGE:
self.file_equiv_delete_liststore.clear()
for items in self.file_equiv_liststore:
if items[2]:
iter = self.file_equiv_delete_liststore.append()
self.file_equiv_delete_liststore.set_value(iter, 1, self.unmarkup(items[0]))
self.file_equiv_delete_liststore.set_value(iter, 2, self.unmarkup(items[1]))
self.show_popup(self.file_equiv_delete_window)
return
def on_save_delete_clicked(self, *args):
self.close_popup()
if self.opage == NETWORK_PAGE:
for delete in self.network_delete_liststore:
if delete[0]:
self.cur_dict["port"][(delete[1], delete[2])] = {"action": "-d", "type": delete[3]}
if self.opage == FILES_PAGE:
for delete in self.files_delete_liststore:
if delete[0]:
self.cur_dict["fcontext"][(delete[1], reverse_file_type_str[delete[3]])] = {"action": "-d", "type": delete[2]}
if self.opage == USER_PAGE:
for delete in self.user_delete_liststore:
if delete[0]:
self.cur_dict["user"][delete[1]] = {"action": "-d", "role": delete[2], "range": delete[4]}
if self.opage == LOGIN_PAGE:
for delete in self.login_delete_liststore:
if delete[0]:
self.cur_dict["login"][delete[2]] = {"action": "-d", "login": delete[2], "seuser": delete[1], "range": delete[3]}
if self.opage == FILE_EQUIV_PAGE:
for delete in self.file_equiv_delete_liststore:
if delete[0]:
self.cur_dict["fcontext-equiv"][delete[1]] = {"action": "-d", "src": delete[2]}
self.new_updates()
def on_save_delete_file_equiv_clicked(self, *args):
for delete in self.files_delete_liststore:
print(delete[0], delete[1], delete[2])
def on_toggle_update(self, cell, path, model):
model[path][0] = not model[path][0]
def ipage_delete(self, liststore, key):
ctr = 0
for items in liststore:
if items[0] == key[0] and items[2] == key[1]:
iter = liststore.get_iter(ctr)
liststore.remove(iter)
return
ctr += 1
def on_toggle(self, cell, path, model):
if not path:
return
iter = self.boolean_filter.get_iter(path)
iter = self.boolean_filter.convert_iter_to_child_iter(iter)
name = model.get_value(iter, 2)
model.set_value(iter, 0, not model.get_value(iter, 0))
active = model.get_value(iter, 0)
if name in self.cur_dict["boolean"]:
del(self.cur_dict["boolean"][name])
else:
self.cur_dict["boolean"][name] = {"active": active}
self.new_updates()
def get_advanced_filter_data(self, entry, *args):
self.filter_txt = entry.get_text()
self.advanced_search_filter.refilter()
def get_filter_data(self, windows, *args):
#search for desired item
# The txt that the use rinputs into the filter is stored in filter_txt
self.filter_txt = windows.get_text()
self.treefilter.refilter()
def update_gui(self, *args):
self.update = True
self.update_treestore.clear()
for bools in self.cur_dict["boolean"]:
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 1, sepolicy.boolean_desc(bools))
self.update_treestore.set_value(iter, 2, action[self.cur_dict["boolean"][bools]['active']])
self.update_treestore.set_value(iter, 3, True)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux name: %s")) % bools)
self.update_treestore.set_value(niter, 3, False)
for path, tclass in self.cur_dict["fcontext"]:
operation = self.cur_dict["fcontext"][(path, tclass)]["action"]
setype = self.cur_dict["fcontext"][(path, tclass)]["type"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file labeling for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file labeling for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file labeling for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path: %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File class: %s")) % sepolicy.file_type_str[tclass])
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for port, protocol in self.cur_dict["port"]:
operation = self.cur_dict["port"][(port, protocol)]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 3, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add ports for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete ports for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify ports for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network ports: %s")) % port)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network protocol: %s")) % protocol)
self.update_treestore.set_value(niter, 3, False)
setype = self.cur_dict["port"][(port, protocol)]["type"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for user in self.cur_dict["user"]:
operation = self.cur_dict["user"][user]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add user"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete user"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify user"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux User : %s")) % user)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
roles = self.cur_dict["user"][user]["role"]
self.update_treestore.set_value(niter, 1, (_("Roles: %s")) % roles)
mls = self.cur_dict["user"][user]["range"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for login in self.cur_dict["login"]:
operation = self.cur_dict["login"][login]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add login mapping"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete login mapping"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify login mapping"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("Login Name : %s")) % login)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
seuser = self.cur_dict["login"][login]["seuser"]
self.update_treestore.set_value(niter, 1, (_("SELinux User: %s")) % seuser)
mls = self.cur_dict["login"][login]["range"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for path in self.cur_dict["fcontext-equiv"]:
operation = self.cur_dict["fcontext-equiv"][path]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file equiv labeling.")))
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file equiv labeling.")))
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file equiv labeling.")))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path : %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
src = self.cur_dict["fcontext-equiv"][path]["src"]
self.update_treestore.set_value(niter, 1, (_("Equivalence: %s")) % src)
self.show_popup(self.update_window)
def set_active_application_button(self):
if self.boolean_radio_button.get_active():
self.active_button = self.boolean_radio_button
if self.files_radio_button.get_active():
self.active_button = self.files_radio_button
if self.transitions_radio_button.get_active():
self.active_button = self.transitions_radio_button
if self.network_radio_button.get_active():
self.active_button = self.network_radio_button
def clearbuttons(self, clear=True):
self.main_selection_window.hide()
self.boolean_radio_button.set_visible(False)
self.files_radio_button.set_visible(False)
self.network_radio_button.set_visible(False)
self.transitions_radio_button.set_visible(False)
self.system_radio_button.set_visible(False)
self.lockdown_radio_button.set_visible(False)
self.user_radio_button.set_visible(False)
self.login_radio_button.set_visible(False)
if clear:
self.completion_entry.set_text("")
def show_system_page(self):
self.clearbuttons()
self.system_radio_button.set_visible(True)
self.lockdown_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("System"))
self.system_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def show_file_equiv_page(self, *args):
self.clearbuttons()
self.file_equiv_initialize()
self.file_equiv_radio_button.set_active(True)
self.applications_selection_button.set_label(_("File Equivalence"))
self.tab_change()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_users_page(self):
self.clearbuttons()
self.login_radio_button.set_visible(True)
self.user_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("Users"))
self.login_radio_button.set_active(True)
self.tab_change()
self.user_initialize()
self.login_initialize()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_applications_page(self):
self.clearbuttons(False)
self.boolean_radio_button.set_visible(True)
self.files_radio_button.set_visible(True)
self.network_radio_button.set_visible(True)
self.transitions_radio_button.set_visible(True)
self.boolean_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def system_interface(self, *args):
self.show_system_page()
def users_interface(self, *args):
self.show_users_page()
def show_mislabeled_files(self, checkbutton, *args):
iterlist = []
ctr = 0
ipage = self.inner_notebook_files.get_current_page()
if checkbutton.get_active() == True:
for items in self.liststore:
iter = self.treesort.get_iter(ctr)
iter = self.treesort.convert_iter_to_child_iter(iter)
iter = self.treefilter.convert_iter_to_child_iter(iter)
if iter != None:
if self.liststore.get_value(iter, 4) == False:
iterlist.append(iter)
ctr += 1
for iters in iterlist:
self.liststore.remove(iters)
elif self.application != None:
self.liststore.clear()
if ipage == EXE_PAGE:
self.executable_files_initialize(self.application)
elif ipage == WRITABLE_PAGE:
self.writable_files_initialize(self.application)
elif ipage == APP_PAGE:
self.application_files_initialize(self.application)
def fix_mislabeled(self, path):
cur = selinux.getfilecon(path)[1].split(":")[2]
con = selinux.matchpathcon(path, 0)[1].split(":")[2]
if self.verify(_("Run restorecon on %(PATH)s to change its type from %(CUR_CONTEXT)s to the default %(DEF_CONTEXT)s?") % {"PATH": path, "CUR_CONTEXT": cur, "DEF_CONTEXT": con}, title="restorecon dialog") == Gtk.ResponseType.YES:
try:
self.dbus.restorecon(path)
self.application_selected()
except dbus.exceptions.DBusException as e:
self.error(e)
def new_updates(self, *args):
self.update_button.set_sensitive(self.modified())
self.revert_button.set_sensitive(self.modified())
def update_or_revert_changes(self, button, *args):
self.update_gui()
self.update = (button.get_label() == _("Update"))
if self.update:
self.update_window.set_title(_("Update Changes"))
else:
self.update_window.set_title(_("Revert Changes"))
def apply_changes_button_press(self, *args):
self.close_popup()
if self.update:
self.update_the_system()
else:
self.revert_data()
self.finish_init = False
self.previously_modified_initialize(self.dbus.customized())
self.finish_init = True
self.clear_filters()
self.application_selected()
self.new_updates()
self.update_treestore.clear()
def update_the_system(self, *args):
self.close_popup()
update_buffer = self.format_update()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
self.error(e)
self.ready_mouse()
self.init_cur()
def ipage_value_lookup(self, lookup):
ipage_values = {"Executable Files": 0, "Writable Files": 1, "Application File Type": 2, "Inbound": 1, "Outbound": 0}
for value in ipage_values:
if value == lookup:
return ipage_values[value]
return "Booleans"
def get_attributes_update(self, attribute):
attribute = attribute.split(": ")[1]
bool_id = attribute.split(": ")[0]
if bool_id == "SELinux name":
self.bool_revert = attribute
else:
return attribute
def format_update(self):
self.revert_data()
update_buffer = ""
for k in self.cur_dict:
if k in "boolean":
for b in self.cur_dict[k]:
update_buffer += "boolean -m -%d %s\n" % (self.cur_dict[k][b]["active"], b)
if k in "login":
for l in self.cur_dict[k]:
if self.cur_dict[k][l]["action"] == "-d":
update_buffer += "login -d %s\n" % l
else:
update_buffer += "login %s -s %s -r %s %s\n" % (self.cur_dict[k][l]["action"], self.cur_dict[k][l]["seuser"], self.cur_dict[k][l]["range"], l)
if k in "user":
for u in self.cur_dict[k]:
if self.cur_dict[k][u]["action"] == "-d":
update_buffer += "user -d %s\n" % u
else:
update_buffer += "user %s -L %s -r %s -R %s %s\n" % (self.cur_dict[k][u]["action"], self.cur_dict[k][u]["level"], self.cur_dict[k][u]["range"], self.cur_dict[k][u]["role"], u)
if k in "fcontext-equiv":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -e %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["src"], f)
if k in "fcontext":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -t %s -f %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], self.cur_dict[k][f]["class"], f)
if k in "port":
for port, protocol in self.cur_dict[k]:
if self.cur_dict[k][(port, protocol)]["action"] == "-d":
update_buffer += "port -d -p %s %s\n" % (protocol, port)
else:
update_buffer += "port %s -t %s -p %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], procotol, port)
return update_buffer
def revert_data(self):
ctr = 0
remove_list = []
update_buffer = ""
for items in self.update_treestore:
if not self.update_treestore[ctr][0]:
remove_list.append(ctr)
ctr += 1
remove_list.reverse()
for ctr in remove_list:
self.remove_cur(ctr)
def reveal_advanced_system(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.system_policy_label.set_visible(advanced)
self.system_policy_type_combobox.set_visible(advanced)
def reveal_advanced(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.files_mls_label.set_visible(advanced)
self.files_mls_entry.set_visible(advanced)
self.network_mls_label.set_visible(advanced)
self.network_mls_entry.set_visible(advanced)
def on_show_advanced_search_window(self, label, *args):
if label.get_text() == ADVANCED_SEARCH_LABEL[1]:
label.set_text(ADVANCED_SEARCH_LABEL[0])
self.close_popup()
else:
label.set_text(ADVANCED_SEARCH_LABEL[1])
self.show_popup(self.advanced_search_window)
def set_enforce_text(self, value):
if value:
self.status_bar.push(self.context_id, _("System Status: Enforcing"))
else:
self.status_bar.push(self.context_id, _("System Status: Permissive"))
self.current_status_permissive.set_active(True)
def set_enforce(self, button):
if not self.finish_init:
return
try:
self.dbus.setenforce(button.get_active())
self.set_enforce_text(button.get_active())
except dbus.exceptions.DBusException as e:
self.error(e)
def on_browse_select(self, *args):
filename = self.file_dialog.get_filename()
if filename == None:
return
self.clear_entry = False
self.file_dialog.hide()
self.files_path_entry.set_text(filename)
if self.import_export == 'Import':
self.import_config(filename)
elif self.import_export == 'Export':
self.export_config(filename)
def recursive_path(self, *args):
path = self.files_path_entry.get_text()
if self.recursive_path_toggle.get_active():
if not path.endswith("(/.*)?"):
self.files_path_entry.set_text(path + "(/.*)?")
elif path.endswith("(/.*)?"):
path = path.split("(/.*)?")[0]
self.files_path_entry.set_text(path)
def highlight_entry_text(self, entry_obj, *args):
txt = entry_obj.get_text()
if self.clear_entry:
entry_obj.set_text('')
self.clear_entry = False
def autofill_add_files_entry(self, entry):
text = entry.get_text()
if text == '':
return
if text.endswith("(/.*)?"):
self.recursive_path_toggle.set_active(True)
for d in sepolicy.DEFAULT_DIRS:
if text.startswith(d):
for t in self.files_type_combolist:
if t[0].endswith(sepolicy.DEFAULT_DIRS[d]):
self.combo_set_active_text(self.files_type_combobox, t[0])
def resize_columns(self, *args):
self.boolean_column_1 = self.boolean_treeview.get_col(1)
width = self.boolean_column_1.get_width()
renderer = self.boolean_column_1.get_cell_renderers()
def browse_for_files(self, *args):
self.file_dialog.show()
def close_config_window(self, *args):
self.file_dialog.hide()
def change_default_policy(self, *args):
if self.typeHistory == self.system_policy_type_combobox.get_active():
return
if self.verify(_("Changing the policy type will cause a relabel of the entire file system on the next boot. Relabeling takes a long time depending on the size of the file system. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.system_policy_type_combobox.set_active(self.typeHistory)
return None
try:
self.dbus.change_default_policy(self.combo_get_active_text(self.system_policy_type_combobox))
self.dbus.relabel_on_boot(True)
self.typeHistory = self.system_policy_type_combobox.get_active()
except dbus.exceptions.DBusException as e:
self.error(e)
def change_default_mode(self, button):
if not self.finish_init:
return
self.enabled_changed(button)
if button.get_active():
try:
self.dbus.change_default_mode(button.get_label().lower())
except dbus.exceptions.DBusException as e:
self.error(e)
def import_config_show(self, *args):
self.file_dialog.set_action(Gtk.FileChooserAction.OPEN)
self.file_dialog.set_title("Import Configuration")
self.file_dialog.show()
#self.file_dialog.set_uri('/tmp')
self.import_export = 'Import'
def export_config_show(self, *args):
self.file_dialog.set_action(Gtk.FileChooserAction.SAVE)
self.file_dialog.set_title("Export Configuration")
self.file_dialog.show()
self.import_export = 'Export'
def export_config(self, filename):
self.wait_mouse()
buf = self.dbus.customized()
fd = open(filename, 'w')
fd.write(buf)
fd.close()
self.ready_mouse()
def import_config(self, filename):
fd = open(filename, "r")
buf = fd.read()
fd.close()
self.wait_mouse()
try:
self.dbus.semanage(buf)
except OSError:
pass
self.ready_mouse()
def init_dictionary(self, dic, app, ipage, operation, p, q, ftype, mls, changed, old):
if (app, ipage, operation) not in dic:
dic[app, ipage, operation] = {}
if (p, q) not in dic[app, ipage, operation]:
dic[app, ipage, operation][p, q] = {'type': ftype, 'mls': mls, 'changed': changed, 'old': old}
def translate_bool(self, b):
b = b.split('-')[1]
if b == '0':
return False
if b == '1':
return True
def relabel_on_reboot(self, *args):
active = self.relabel_button.get_active()
exists = os.path.exists("/.autorelabel")
if active and exists:
return
if not active and not exists:
return
try:
self.dbus.relabel_on_boot(active)
except dbus.exceptions.DBusException as e:
self.error(e)
def closewindow(self, window, *args):
window.hide()
self.recursive_path_toggle.set_active(False)
self.window.set_sensitive(True)
if self.moreTypes_window_files == window:
self.show_popup(self.files_popup_window)
if self.combo_get_active_text(self.files_type_combobox) == _('More...'):
self.files_type_combobox.set_active(0)
if self.error_check_window == window:
if self.files_add:
self.show_popup(self.files_popup_window)
elif self.network_add:
self.show_popup(self.network_popup_window)
if self.files_mls_label.get_visible() or self.network_mls_label.get_visible():
self.advanced_text_files.set_visible(True)
self.files_mls_label.set_visible(False)
self.files_mls_entry.set_visible(False)
self.advanced_text_network.set_visible(True)
self.network_mls_label.set_visible(False)
self.network_mls_entry.set_visible(False)
if self.main_advanced_label.get_text() == ADVANCED_SEARCH_LABEL[1]:
self.main_advanced_label.set_text(ADVANCED_SEARCH_LABEL[0])
return True
def wait_mouse(self):
self.window.get_window().set_cursor(self.busy_cursor)
self.idle_func()
def ready_mouse(self):
self.window.get_window().set_cursor(self.ready_cursor)
self.idle_func()
def verify(self, message, title=""):
dlg = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.YES_NO,
message)
dlg.set_title(title)
dlg.set_position(Gtk.WindowPosition.MOUSE)
dlg.show_all()
rc = dlg.run()
dlg.destroy()
return rc
def error(self, message):
dlg = Gtk.MessageDialog(None, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE,
message)
dlg.set_position(Gtk.WindowPosition.MOUSE)
dlg.show_all()
dlg.run()
dlg.destroy()
def enabled_changed(self, radio):
if not radio.get_active():
return
label = radio.get_label()
if label == 'Disabled' and self.enforce_mode != DISABLED:
if self.verify(_("Changing to SELinux disabled requires a reboot. It is not recommended. If you later decide to turn SELinux back on, the system will be required to relabel. If you just want to see if SELinux is causing a problem on your system, you can go to permissive mode which will only log errors and not enforce SELinux policy. Permissive mode does not require a reboot. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.enforce_button.set_active(True)
if label != 'Disabled' and self.enforce_mode == DISABLED:
if self.verify(_("Changing to SELinux enabled will cause a relabel of the entire file system on the next boot. Relabeling takes a long time depending on the size of the file system. Do you wish to continue?")) == Gtk.ResponseType.NO:
self.enforce_button.set_active(True)
self.enforce_button = radio
def clear_filters(self, *args):
self.filter_entry.set_text('')
self.show_modified_only.set_active(False)
def unconfined_toggle(self, *args):
if not self.finish_init:
return
self.wait_mouse()
if self.enable_unconfined_button.get_active():
self.dbus.semanage("module -e unconfined")
else:
self.dbus.semanage("module -d unconfined")
self.ready_mouse()
def permissive_toggle(self, *args):
if not self.finish_init:
return
self.wait_mouse()
try:
if self.enable_permissive_button.get_active():
self.dbus.semanage("module -e permissivedomains")
else:
self.dbus.semanage("module -d permissivedomains")
except dbus.exceptions.DBusException as e:
self.error(e)
self.ready_mouse()
def confirmation_close(self, button, *args):
if len(self.update_treestore) > 0:
if self.verify(_("You are attempting to close the application without applying your changes.\n * To apply changes you have made during this session, click No and click Update.\n * To leave the application without applying your changes, click Yes. All changes that you have made during this session will be lost."), _("Loss of data Dialog")) == Gtk.ResponseType.NO:
return True
self.quit()
def quit(self, *args):
sys.exit(0)
if __name__ == '__main__':
start = SELinuxGui()
|
rhatdan/selinux
|
policycoreutils/sepolicy/sepolicy/gui.py
|
Python
|
gpl-2.0
| 133,940
|
# coding: utf-8"
"""
Example demonstrating how to use the ``input_label`` to send images between
blocks.
This mechanism is useful to perform fake tests by using using generated images
instead of cameras to read images.
Required hardware:
- Any camera
"""
import crappy
if __name__ == "__main__":
cam1 = crappy.blocks.Camera('Webcam')
dis = crappy.blocks.DISCorrel('', input_label='frame')
crappy.link(cam1, dis)
graph = crappy.blocks.Grapher(('t(s)', 'x(pix)'))
crappy.link(dis, graph)
crappy.start()
|
LaboratoireMecaniqueLille/crappy
|
Examples/sending_img_through_links.py
|
Python
|
gpl-2.0
| 523
|
#!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
The Yum RPM software updater.
"""
import os
import os.path
import rpm
import sys
def _rpm_ver_atleast(vertup):
""" Check if rpm is at least the current vertup. Can return False/True/None
as rpm hasn't had version info for a long time. """
if not hasattr(rpm, '__version_info__'):
return None
try:
# 4.8.x rpm used strings for the tuple members, so convert.
vi = tuple([ int(num) for num in rpm.__version_info__])
return vi >= vertup
except:
return None # Something went wrong...
import re
import types
import errno
import time
import glob
import fnmatch
import logging
import logging.config
import operator
import tempfile
import shutil
import yum.i18n
# This is required to make gaftonmode work...
_wrap_yum_i18n__ = yum.i18n._
def _(*args, **kwargs):
return _wrap_yum_i18n__(*args, **kwargs)
_wrap_yum_i18n_P_ = yum.i18n.P_
def P_(*args, **kwargs):
return _wrap_yum_i18n_P_(*args, **kwargs)
import config
from config import ParsingError, ConfigParser
import Errors
import rpmsack
import rpmUtils.updates
from rpmUtils.arch import archDifference, canCoinstall, ArchStorage, isMultiLibArch
from rpmUtils.miscutils import compareEVR
import rpmUtils.transaction
import comps
import pkgtag_db
from repos import RepoStorage
import misc
from parser import ConfigPreProcessor, varReplace
import transactioninfo
import urlgrabber
from urlgrabber.grabber import URLGrabber, URLGrabError
from urlgrabber.progress import format_number
from packageSack import packagesNewestByName, packagesNewestByNameArch, ListPackageSack
import depsolve
import plugins
import logginglevels
import yumRepo
import callbacks
import yum.history
import yum.fssnapshots
import yum.igroups
import update_md
import warnings
warnings.simplefilter("ignore", Errors.YumFutureDeprecationWarning)
from packages import parsePackages, comparePoEVR
from packages import YumAvailablePackage, YumLocalPackage, YumInstalledPackage
from packages import YumUrlPackage, YumNotFoundPackage
from constants import *
from yum.rpmtrans import RPMTransaction,SimpleCliCallBack
from yum.i18n import to_unicode, to_str, exception2msg
from yum.drpm import DeltaInfo, DeltaPackage
import string
import StringIO
from weakref import proxy as weakref
from urlgrabber.grabber import default_grabber
try:
import cashe
except ImportError:
cashe = None
__version__ = '3.4.3'
__version_info__ = tuple([ int(num) for num in __version__.split('.')])
# Setup a default_grabber UA here that says we are yum, done using the global
# so that other API users can easily add to it if they want.
# Don't do it at init time, or we'll get multiple additions if you create
# multiple YumBase() objects.
default_grabber.opts.user_agent += " yum/" + __version__
class _YumPreBaseConf:
"""This is the configuration interface for the :class:`YumBase`
configuration. To change configuration settings such as whether
plugins are on or off, or the value of debuglevel, change the
values here. Later, when :func:`YumBase.conf` is first called, all
of the options will be automatically configured.
"""
def __init__(self):
self.fn = '/etc/yum/yum.conf'
self.root = '/'
self.init_plugins = True
self.plugin_types = (plugins.TYPE_CORE,)
self.optparser = None
self.debuglevel = None
self.errorlevel = None
self.disabled_plugins = None
self.enabled_plugins = None
self.syslog_ident = None
self.syslog_facility = None
self.syslog_device = None
self.arch = None
self.releasever = None
self.uuid = None
class _YumPreRepoConf:
"""This is the configuration interface for the repos configuration
configuration. To change configuration settings such what
callbacks are used, change the values here. Later, when
:func:`YumBase.repos` is first called, all of the options will be
automatically configured.
"""
def __init__(self):
self.progressbar = None
self.multi_progressbar = None
self.callback = None
self.failure_callback = None
self.interrupt_callback = None
self.confirm_func = None
self.gpg_import_func = None
self.gpgca_import_func = None
self.cachedir = None
self.cache = None
class _YumCostExclude:
""" This excludes packages that are in repos. of lower cost than the passed
repo. """
def __init__(self, repo, repos):
self.repo = weakref(repo)
self._repos = weakref(repos)
def __contains__(self, pkgtup):
# (n, a, e, v, r) = pkgtup
for repo in self._repos.listEnabled():
if repo.cost >= self.repo.cost:
break
# searchNevra is a bit slower, although more generic for repos.
# that don't use sqlitesack as the backend ... although they are
# probably screwed anyway.
#
# if repo.sack.searchNevra(n, e, v, r, a):
if pkgtup in repo.sack._pkgtup2pkgs:
return True
return False
class YumBase(depsolve.Depsolve):
"""This is a primary structure and base class. It houses the
objects and methods needed to perform most things in yum. It is
almost an abstract class in that you will need to add your own
class above it for most real use.
"""
def __init__(self):
depsolve.Depsolve.__init__(self)
self._conf = None
self._tsInfo = None
self._rpmdb = None
self._up = None
self._comps = None
self._history = None
self._igroups = None
self._pkgSack = None
self._lockfile = None
self._tags = None
self._upinfo = None
self._fssnap = None
self._ts_save_file = None
self.skipped_packages = [] # packages skip by the skip-broken code
self._not_found_a = {}
self._not_found_i = {}
self.logger = logging.getLogger("yum.YumBase")
self.verbose_logger = logging.getLogger("yum.verbose.YumBase")
self._override_sigchecks = False
self._repos = RepoStorage(self)
self.repo_setopts = {} # since we have to use repo_setopts in base and
# not in cli - set it up as empty so no one
# trips over it later
# Start with plugins disabled
self.disablePlugins()
self.localPackages = [] # for local package handling
self.mediagrabber = None
self.arch = ArchStorage()
self.preconf = _YumPreBaseConf()
self.prerepoconf = _YumPreRepoConf()
self.run_with_package_names = set()
self._cleanup = []
self.exit_code = 0
self.updateinfo_filters = {}
def __del__(self):
self.close()
self.closeRpmDB()
self.doUnlock()
# call cleanup callbacks
for cb in self._cleanup: cb()
def close(self):
"""Close the history and repo objects."""
# We don't want to create the object, so we test if it's been created
if self._history is not None:
self.history.close()
if self._igroups is not None:
self.igroups.close()
if self._repos:
self._repos.close()
def _transactionDataFactory(self):
"""Factory method returning TransactionData object"""
return transactioninfo.TransactionData()
def doGenericSetup(self, cache=0):
"""Do a default setup for all the normal or necessary yum
components. This function is really just a shorthand for
testing purposes.
:param cache: whether to run in cache only mode, which will
run only from the system cache
"""
self.preconf.init_plugins = False
self.conf.cache = cache
def doConfigSetup(self, fn='/etc/yum/yum.conf', root='/', init_plugins=True,
plugin_types=(plugins.TYPE_CORE,), optparser=None, debuglevel=None,
errorlevel=None):
"""Deprecated. Perform configuration setup.
:param fn: the name of the configuration file to use
:param root: the root directory to use
:param init_plugins: whether to initialize plugins before
running yum
:param plugin_types: a tuple containing the types to plugins
to load
:param optparser: the option parser to use for configuration
:param debuglevel: the minimum debug logging level to output
messages from
:param errorlevel: the minimum error logging level to output
messages from
"""
warnings.warn(_('doConfigSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
if hasattr(self, 'preconf'):
self.preconf.fn = fn
self.preconf.root = root
self.preconf.init_plugins = init_plugins
self.preconf.plugin_types = plugin_types
self.preconf.optparser = optparser
self.preconf.debuglevel = debuglevel
self.preconf.errorlevel = errorlevel
return self.conf
def _getConfig(self, **kwargs):
'''
Parse and load Yum's configuration files and call hooks initialise
plugins and logging. Uses self.preconf for pre-configuration,
configuration. '''
# ' xemacs syntax hack
if kwargs:
warnings.warn('Use .preconf instead of passing args to _getConfig')
if self._conf:
return self._conf
conf_st = time.time()
if kwargs:
for arg in ('fn', 'root', 'init_plugins', 'plugin_types',
'optparser', 'debuglevel', 'errorlevel',
'disabled_plugins', 'enabled_plugins'):
if arg in kwargs:
setattr(self.preconf, arg, kwargs[arg])
fn = self.preconf.fn
root = self.preconf.root
init_plugins = self.preconf.init_plugins
plugin_types = self.preconf.plugin_types
optparser = self.preconf.optparser
debuglevel = self.preconf.debuglevel
errorlevel = self.preconf.errorlevel
disabled_plugins = self.preconf.disabled_plugins
enabled_plugins = self.preconf.enabled_plugins
syslog_ident = self.preconf.syslog_ident
syslog_facility = self.preconf.syslog_facility
syslog_device = self.preconf.syslog_device
releasever = self.preconf.releasever
arch = self.preconf.arch
uuid = self.preconf.uuid
if arch: # if preconf is setting an arch we need to pass that up
self.arch.setup_arch(arch)
else:
arch = self.arch.canonarch
# TODO: Remove this block when we no longer support configs outside
# of /etc/yum/
if fn == '/etc/yum/yum.conf' and not os.path.exists(fn):
# Try the old default
fn = '/etc/yum.conf'
startupconf = config.readStartupConfig(fn, root, releasever)
startupconf.arch = arch
startupconf.basearch = self.arch.basearch
if uuid:
startupconf.uuid = uuid
if startupconf.gaftonmode:
global _wrap_yum_i18n__
_wrap_yum_i18n__ = yum.i18n.dummy_wrapper
global _wrap_yum_i18n_P_
_wrap_yum_i18n_P_ = yum.i18n.dummyP_wrapper
if debuglevel != None:
startupconf.debuglevel = debuglevel
if errorlevel != None:
startupconf.errorlevel = errorlevel
if syslog_ident != None:
startupconf.syslog_ident = syslog_ident
if syslog_facility != None:
startupconf.syslog_facility = syslog_facility
if syslog_device != None:
startupconf.syslog_device = syslog_device
if releasever == '/':
if startupconf.installroot == '/':
releasever = None
else:
releasever = yum.config._getsysver("/",startupconf.distroverpkg)
if releasever != None:
startupconf.releasever = releasever
self.doLoggingSetup(startupconf.debuglevel, startupconf.errorlevel,
startupconf.syslog_ident,
startupconf.syslog_facility,
startupconf.syslog_device)
if init_plugins and startupconf.plugins:
self.doPluginSetup(optparser, plugin_types, startupconf.pluginpath,
startupconf.pluginconfpath,disabled_plugins,enabled_plugins)
self._conf = config.readMainConfig(startupconf)
# update urlgrabber defaults
mc = self._conf.max_connections
if mc > 0:
default_grabber.opts.max_connections = mc
default_grabber.opts.timedhosts = self._conf.cachedir + '/timedhosts'
# We don't want people accessing/altering preconf after it becomes
# worthless. So we delete it, and thus. it'll raise AttributeError
del self.preconf
# Packages used to run yum...
for pkgname in self.conf.history_record_packages:
self.run_with_package_names.add(pkgname)
self._cashe = None
if cashe is not None:
self._cashe = cashe.CAShe(self.conf.cashe_root_dir)
# run the postconfig plugin hook
self.plugins.run('postconfig')
# Note that Pungi has historically replaced _getConfig(), and it sets
# up self.conf.yumvar but not self.yumvar ... and AFAIK nothing needs
# to use YumBase.yumvar, so it's probably easier to just semi-deprecate
# this (core now only uses YumBase.conf.yumvar).
self.yumvar = self.conf.yumvar
# who are we:
self.conf.uid = os.geteuid()
# repos are ver/arch specific so add $basearch/$releasever
self.conf._repos_persistdir = os.path.normpath('%s/repos/%s/%s/'
% (self.conf.persistdir, self.yumvar.get('basearch', '$basearch'),
self.yumvar.get('releasever', '$releasever')))
self.doFileLogSetup(self.conf.uid, self.conf.logfile)
self.verbose_logger.debug('Config time: %0.3f' % (time.time() - conf_st))
self.plugins.run('init')
return self._conf
def doLoggingSetup(self, debuglevel, errorlevel,
syslog_ident=None, syslog_facility=None,
syslog_device='/dev/log'):
"""Perform logging related setup.
:param debuglevel: the minimum debug logging level to output
messages from
:param errorlevel: the minimum error logging level to output
messages from
:param syslog_ident: the ident of the syslog to use
:param syslog_facility: the name of the syslog facility to use
:param syslog_device: the syslog device to use
"""
logginglevels.doLoggingSetup(debuglevel, errorlevel,
syslog_ident, syslog_facility,
syslog_device)
def doFileLogSetup(self, uid, logfile):
"""Set up the logging file.
:param uid: the user id of the current user
:param logfile: the name of the file to use for logging
"""
logginglevels.setFileLog(uid, logfile, self._cleanup)
def getReposFromConfigFile(self, repofn, repo_age=None, validate=None):
"""Read in repositories from a config .repo file.
:param repofn: a string specifying the path of the .repo file
to read
:param repo_age: the last time that the .repo file was
modified, in seconds since the epoch
"""
if repo_age is None:
repo_age = os.stat(repofn)[8]
confpp_obj = ConfigPreProcessor(repofn, vars=self.conf.yumvar)
parser = ConfigParser()
try:
parser.readfp(confpp_obj)
except ParsingError, e:
raise Errors.ConfigError(exception2msg(e))
# Check sections in the .repo file that was just slurped up
for section in parser.sections():
if section in ['main', 'installed']:
continue
# Check the repo.id against the valid chars
bad = None
for byte in section:
if byte in string.ascii_letters:
continue
if byte in string.digits:
continue
if byte in "-_.:":
continue
bad = byte
break
if bad:
self.logger.warning("Bad id for repo: %s, byte = %s %d" %
(section, bad, section.find(bad)))
continue
try:
thisrepo = self.readRepoConfig(parser, section)
except (Errors.RepoError, Errors.ConfigError), e:
self.logger.warning(e)
continue
else:
thisrepo.repo_config_age = repo_age
thisrepo.repofile = repofn
thisrepo.base_persistdir = self.conf._repos_persistdir
# do the wildcard ones first
# The keys are in indeterminate order at this point, *sigh*.
for i in sorted(self.repo_setopts):
# Skip normal names, as we want to do wildcard matches first
# and then override with specific id stuff.
if not misc.re_glob(i):
continue
if fnmatch.fnmatch(thisrepo.id, i):
for opt in self.repo_setopts[i].items:
self._checkOption(opt, thisrepo)
setattr(thisrepo, opt, getattr(self.repo_setopts[i], opt))
if thisrepo.id in self.repo_setopts:
for opt in self.repo_setopts[thisrepo.id].items:
self._checkOption(opt, thisrepo)
setattr(thisrepo, opt, getattr(self.repo_setopts[thisrepo.id], opt))
if validate and not validate(thisrepo):
continue
if thisrepo.ssl_check_cert_permissions:
for fn in (thisrepo.sslcacert,
thisrepo.sslclientcert, thisrepo.sslclientkey):
if not fn:
continue
# If we can't read the SSL certs. we need to skip the repo.
# if we don't have all the data.
if not os.access(fn, os.R_OK):
msg="Repo %s forced skip_if_unavailable=True due to: %s"
if thisrepo.enabled:
# Don't spam messages for disabled repos.
self.logger.warning(msg % (thisrepo.id, fn))
thisrepo.skip_if_unavailable = True
# Got our list of repo objects, add them to the repos
# collection
try:
self._repos.add(thisrepo)
except Errors.RepoError, e:
self.logger.warning(e)
def _checkOption(self, opt, thisrepo):
"""Quit if the option is invalid"""
if not hasattr(thisrepo, opt):
msg = "Invalid option: %s"
raise Errors.ConfigError(msg % opt)
def getReposFromConfig(self):
"""Read in repositories from the main yum conf file, and from
.repo files. The location of the main yum conf file is given
by self.conf.config_file_path, and the location of the
directory of .repo files is given by self.conf.reposdir.
"""
# Read .repo files from directories specified by the reposdir option
# (typically /etc/yum/repos.d)
repo_config_age = self.conf.config_file_age
# Get the repos from the main yum.conf file
self.getReposFromConfigFile(self.conf.config_file_path, repo_config_age)
for reposdir in self.conf.reposdir:
# this check makes sure that our dirs exist properly.
# if they aren't in the installroot then don't prepend the installroot path
# if we don't do this then anaconda likes to not work.
if os.path.exists(self.conf.installroot+'/'+reposdir):
reposdir = self.conf.installroot + '/' + reposdir
reposdir = os.path.normpath(reposdir)
if os.path.isdir(reposdir):
for repofn in sorted(glob.glob('%s/*.repo' % reposdir)):
if not os.access(repofn, os.R_OK):
self.logger.warning(_("Skipping unreadable repository %s"), repr(repofn))
continue
thisrepo_age = os.stat(repofn)[8]
if thisrepo_age < repo_config_age:
thisrepo_age = repo_config_age
self.getReposFromConfigFile(repofn, repo_age=thisrepo_age)
def readRepoConfig(self, parser, section):
"""Parse an INI file section for a repository.
:param parser: :class:`ConfigParser` or similar object to read
INI file values from
:param section: INI file section to read
:return: :class:`yum.yumRepo.YumRepository` instance
"""
repo = yumRepo.YumRepository(section)
try:
repo.populate(parser, section, self.conf)
except ValueError, e:
msg = _('Repository %r: Error parsing config: %s' % (section,e))
raise Errors.ConfigError, msg
# Ensure that the repo name is set
if not repo.name:
repo.name = section
self.logger.error(_('Repository %r is missing name in configuration, '
'using id') % section)
repo.name = to_unicode(repo.name)
# Set attributes not from the config file
repo.old_base_cache_dir = getattr(self, '_old_cachedir', '')
repo.basecachedir = self.conf.cachedir
repo.yumvar.update(self.conf.yumvar)
repo._cashe = self._cashe
repo.cfg = parser
# Enable parallel downloading
repo._async = repo.async
# Allow caching local repos
if repo.keepcache and any(u.startswith('file:') for u in repo.baseurl):
repo.copy_local = True
return repo
def disablePlugins(self):
"""Disable yum plugins."""
self.plugins = plugins.DummyYumPlugins()
def doPluginSetup(self, optparser=None, plugin_types=None, searchpath=None,
confpath=None,disabled_plugins=None,enabled_plugins=None):
"""Initialise and enable yum plugins.
Note: _getConfig() will also initialise plugins if instructed
to. Only call this method directly if not calling _getConfig()
or calling doConfigSetup(init_plugins=False).
:param optparser: the :class:`OptionParser` instance to use
for this run
:param plugin_types: a sequence specifying the types of plugins to load.
This should be a sequence containing one or more of the
yum.plugins.TYPE_... constants. If None (the default), all plugins
will be loaded
:param searchpath: a list of directories to look in for plugins. A
default will be used if no value is specified
:param confpath: a list of directories to look in for plugin
configuration files. A default will be used if no value is
specified
:param disabled_plugins: a list of plugins to be disabled
:param enabled_plugins: a list plugins to be enabled
"""
if isinstance(self.plugins, plugins.YumPlugins):
raise RuntimeError(_("plugins already initialised"))
self.plugins = plugins.YumPlugins(self, searchpath, optparser,
plugin_types, confpath, disabled_plugins, enabled_plugins)
def doRpmDBSetup(self):
"""Deprecated. Set up the rpm database."""
warnings.warn(_('doRpmDBSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
return self._getRpmDB()
def _getRpmDB(self):
"""sets up a holder object for important information from the rpmdb"""
if self._rpmdb is None:
rpmdb_st = time.time()
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Reading Local RPMDB'))
self._rpmdb = rpmsack.RPMDBPackageSack(root=self.conf.installroot,
releasever=self.conf.yumvar['releasever'],
persistdir=self.conf.persistdir)
self.verbose_logger.debug('rpmdb time: %0.3f' % (time.time() - rpmdb_st))
return self._rpmdb
def closeRpmDB(self):
"""Closes down the instances of rpmdb that could be open."""
if self._rpmdb is not None:
self._rpmdb.ts = None
self._rpmdb.dropCachedData()
self._rpmdb = None
self._ts = None
self._tsInfo = None
self._up = None
self.comps = None
self.igroups = None
def _deleteTs(self):
del self._ts
self._ts = None
def doRepoSetup(self, thisrepo=None):
"""Deprecated. Set up the yum repositories.
:param thisrepo: the repository to set up. If None, all
repositories will be set up
:return: the set up repos
"""
warnings.warn(_('doRepoSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
return self._getRepos(thisrepo, True)
def _getRepos(self, thisrepo=None, doSetup = False):
""" For each enabled repository set up the basics of the repository. """
if hasattr(self, 'prerepoconf'):
self.conf # touch the config class first
if (self.conf.installroot != '/' and
not hasattr(self, '_old_cachedir')):
# Try loading cache from outside...
ir = len(self.conf.installroot)
self._old_cachedir = self.conf.cachedir[ir:]
self.getReposFromConfig()
# For rhnplugin, and in theory other stuff, calling
# .getReposFromConfig() recurses back into this function but only once.
# This means that we have two points on the stack leaving the above call
# but only one of them can do the repos setup. BZ 678043.
if hasattr(self, 'prerepoconf'):
# Recursion
prerepoconf = self.prerepoconf
del self.prerepoconf
self.repos.setProgressBar(prerepoconf.progressbar,
prerepoconf.multi_progressbar)
self.repos.callback = prerepoconf.callback
self.repos.setFailureCallback(prerepoconf.failure_callback)
self.repos.setInterruptCallback(prerepoconf.interrupt_callback)
self.repos.confirm_func = prerepoconf.confirm_func
self.repos.gpg_import_func = prerepoconf.gpg_import_func
self.repos.gpgca_import_func = prerepoconf.gpgca_import_func
if prerepoconf.cachedir is not None:
self.repos.setCacheDir(prerepoconf.cachedir)
if prerepoconf.cache is not None:
self.repos.setCache(prerepoconf.cache)
if doSetup:
repo_st = time.time()
self._repos.doSetup(thisrepo)
self.verbose_logger.debug('repo time: %0.3f' % (time.time() - repo_st))
return self._repos
def _delRepos(self):
del self._repos
self._repos = RepoStorage(self)
def doSackSetup(self, archlist=None, thisrepo=None):
"""Deprecated. Populate the package sacks with information
from our repositories.
:param archlist: a list of the names of archs to include. If
None, all arches are set up
:param thisrepo: the repository to use. If None, all enabled
repositories are used
"""
warnings.warn(_('doSackSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
return self._getSacks(archlist=archlist, thisrepo=thisrepo)
def _getSacks(self, archlist=None, thisrepo=None):
"""populates the package sacks for information from our repositories,
takes optional archlist for archs to include"""
# FIXME: Fist of death ... normally we'd do either:
#
# 1. use self._pkgSack is not None, and only init. once.
# 2. auto. correctly re-init each time a repo is added/removed
#
# ...we should probably just smeg it and do #2, but it's hard and will
# probably break something (but it'll "fix" excludes).
# #1 can't be done atm. because we did self._pkgSack and external
# tools now rely on being able to create an empty sack and then have it
# auto. re-init when they add some stuff. So we add a bit more "clever"
# and don't setup the pkgSack to not be None when it's empty. This means
# we skip excludes/includes/etc. ... but there's no packages, so
# hopefully that's ok.
if self._pkgSack is not None and thisrepo is None:
return self._pkgSack
if thisrepo is None:
repos = 'enabled'
else:
repos = self.repos.findRepos(thisrepo)
self.verbose_logger.debug(_('Setting up Package Sacks'))
sack_st = time.time()
if not archlist:
archlist = self.arch.archlist
archdict = {}
for arch in archlist:
archdict[arch] = 1
self.repos.getPackageSack().setCompatArchs(archdict)
self.repos.populateSack(which=repos)
if not self.repos.getPackageSack():
return self.repos.getPackageSack() # ha ha, see above
self._pkgSack = self.repos.getPackageSack()
self.excludePackages()
self._pkgSack.excludeArchs(archlist)
#FIXME - this could be faster, too.
if repos == 'enabled':
repos = self.repos.listEnabled()
for repo in repos:
self.includePackages(repo)
self.excludePackages(repo)
self.plugins.run('exclude')
self._pkgSack.buildIndexes()
# now go through and kill pkgs based on pkg.repo.cost()
self.costExcludePackages()
self.verbose_logger.debug('pkgsack time: %0.3f' % (time.time() - sack_st))
return self._pkgSack
def _delSacks(self):
"""reset the package sacks back to zero - making sure to nuke the ones
in the repo objects, too - where it matters"""
# nuke the top layer
self._pkgSack = None
for repo in self.repos.repos.values():
if hasattr(repo, '_resetSack'):
repo._resetSack()
else:
warnings.warn(_('repo object for repo %s lacks a _resetSack method\n') +
_('therefore this repo cannot be reset.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
def doUpdateSetup(self):
"""Deprecated. Set up the update object in the base class and populate the
updates, obsoletes, and other lists.
"""
warnings.warn(_('doUpdateSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
return self._getUpdates()
def _getUpdates(self):
"""setups up the update object in the base class and fills out the
updates, obsoletes and others lists"""
if self._up:
return self._up
self.verbose_logger.debug(_('Building updates object'))
up_st = time.time()
self._up = rpmUtils.updates.Updates(self.rpmdb.simplePkgList(), self.pkgSack.simplePkgList())
if self.conf.debuglevel >= 7:
self._up.debug = 1
if hasattr(self, '_up_obs_hack'):
self._up.rawobsoletes = self._up_obs_hack.rawobsoletes
del self._up_obs_hack
elif self.conf.obsoletes:
obs_init = time.time()
# Note: newest=True here is semi-required for repos. with multiple
# versions. The problem is that if pkgA-2 _accidentally_ obsoletes
# pkgB-1, and we keep all versions, we want to release a pkgA-3
# that doesn't do the obsoletes ... and thus. not obsolete pkgB-1.
self._up.rawobsoletes = self.pkgSack.returnObsoletes(newest=True)
self.verbose_logger.debug('up:Obs Init time: %0.3f' % (time.time() - obs_init))
self._up.myarch = self.arch.canonarch
self._up._is_multilib = self.arch.multilib
self._up._archlist = self.arch.archlist
self._up._multilib_compat_arches = self.arch.compatarches
self._up.exactarch = self.conf.exactarch
self._up.exactarchlist = self.conf.exactarchlist
up_pr_st = time.time()
self._up.doUpdates()
self.verbose_logger.debug('up:simple updates time: %0.3f' % (time.time() - up_pr_st))
if self.conf.obsoletes:
obs_st = time.time()
self._up.doObsoletes()
self.verbose_logger.debug('up:obs time: %0.3f' % (time.time() - obs_st))
cond_up_st = time.time()
self._up.condenseUpdates()
self.verbose_logger.debug('up:condense time: %0.3f' % (time.time() - cond_up_st))
self.verbose_logger.debug('updates time: %0.3f' % (time.time() - up_st))
return self._up
def doGroupSetup(self):
"""Deprecated. Create and populate the groups object."""
warnings.warn(_('doGroupSetup() will go away in a future version of Yum.\n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
self.comps = None
return self._getGroups()
def _setGroups(self, val):
if val is None:
# if we unset the comps object, we need to undo which repos have
# been added to the group file as well
if self._repos:
# Used to do listGroupsEnabled(), which seems fine but requires
# calling .listEnalbed() ... which doesn't work on __del__ path
# if we haven't already called that (due to
# "prelistenabledrepos" plugins). So just blank it for
# all repos.
for repo in self._repos.sort():
repo.groups_added = False
self._comps = val
def _getGroups(self):
"""create the groups object that will store the comps metadata
finds the repos with groups, gets their comps data and merge it
into the group object"""
if self._comps:
return self._comps
group_st = time.time()
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Getting group metadata'))
reposWithGroups = []
# Need to make sure the groups data is ready to read. Really we'd want
# to add groups to the mdpolicy list of the repo. but we don't atm.
self.pkgSack
for repo in self.repos.listGroupsEnabled():
if repo.groups_added: # already added the groups from this repo
reposWithGroups.append(repo)
continue
if not repo.ready():
raise Errors.RepoError("Repository '%s' not yet setup" % repo,
repo=repo)
try:
groupremote = repo.getGroupLocation()
except Errors.RepoMDError, e:
pass
else:
reposWithGroups.append(repo)
# now we know which repos actually have groups files.
overwrite = self.conf.overwrite_groups
self._comps = comps.Comps(overwrite_groups = overwrite)
if self.conf.group_command == 'objects':
# Add the ids for installed groups/envs as though sys is a repo.
# makes everything easier (comps.return_groups() etc. works)...
self._comps.compscount += 1
for gid in self.igroups.groups:
grp = comps.Group()
grp.name = grp.groupid = gid
grp._weak = True
self._comps.add_group(grp)
for evgid in self.igroups.environments:
env = comps.Environment()
env.name = env.environmentid = evgid
env._weak = True
self._comps.add_environment(env)
for repo in reposWithGroups:
if repo.groups_added: # already added the groups from this repo
continue
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Adding group file from repository: %s'), repo)
groupfile = repo.getGroups()
if not groupfile:
msg = _('Failed to retrieve group file for repository: %s') % repo
self.logger.critical(msg)
continue
try:
self._comps.add(groupfile)
except (Errors.GroupsError,Errors.CompsException), e:
msg = _('Failed to add groups file for repository: %s - %s') % (repo, exception2msg(e))
self.logger.critical(msg)
else:
repo.groups_added = True
if self._comps.compscount == 0:
raise Errors.GroupsError, _('No Groups Available in any repository')
# Note that this means that grp.installed is not usable, when using
# groups as objects ... but that's GOOD.
if self.conf.group_command != 'objects':
self._comps.compile(self.rpmdb.simplePkgList())
self.verbose_logger.debug('group time: %0.3f' % (time.time() - group_st))
return self._comps
def _getTags(self):
""" create the tags object used to search/report from the pkgtags
metadata"""
tag_st = time.time()
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Getting pkgtags metadata'))
if self._tags is None:
self._tags = yum.pkgtag_db.PackageTags()
for repo in self.repos.listEnabled():
if 'pkgtags' not in repo.repoXML.fileTypes():
continue
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Adding tags from repository: %s'), repo)
# fetch the sqlite tagdb
try:
tag_md = repo.retrieveMD('pkgtags')
tag_sqlite = misc.repo_gen_decompress(tag_md,
'pkgtags.sqlite',
cached=repo.cache)
# feed it into _tags.add()
self._tags.add(repo.id, tag_sqlite)
except (Errors.RepoError, Errors.PkgTagsError), e:
msg = _('Failed to add Pkg Tags for repository: %s - %s') % (repo, exception2msg(e))
self.logger.critical(msg)
self.verbose_logger.debug('tags time: %0.3f' % (time.time() - tag_st))
return self._tags
def _getUpdateinfo(self):
""" create the Update Info object used to search/report the updateinfo
metadata"""
upi_st = time.time()
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Getting updateinfo metadata'))
if self._upinfo is None:
logger = logging.getLogger("yum.update_md")
vlogger = logging.getLogger("yum.verbose.update_md")
self._upinfo = update_md.UpdateMetadata(logger=logger,
vlogger=vlogger)
for repo in self.repos.listEnabled():
if 'updateinfo' not in repo.repoXML.fileTypes():
continue
self.verbose_logger.log(logginglevels.DEBUG_4,
_('Adding Update Info from repository: %s'), repo)
try:
self._upinfo.add(repo)
except Errors.RepoMDError, e:
msg = _('Failed to add Update Info for repository: %s - %s') % (repo, exception2msg(e))
self.logger.critical(msg)
self.verbose_logger.debug('updateinfo time: %0.3f' %
(time.time() - upi_st))
return self._upinfo
def _getHistory(self):
"""auto create the history object that to access/append the transaction
history information. """
if self._history is None:
pdb_path = self.conf.persistdir + "/history"
self._history = yum.history.YumHistory(root=self.conf.installroot,
db_path=pdb_path,
releasever=self.conf.yumvar['releasever'])
return self._history
def _getFSsnap(self):
""" create the fssnap object used to query/create snapshots. """
if self._fssnap is None:
devices = self.conf.fssnap_devices
self._fssnap = yum.fssnapshots._FSSnap(root=self.conf.installroot,
devices=devices)
return self._fssnap
def _getIGroups(self):
"""auto create the installed groups object that to access/change the
installed groups information. """
if self._igroups is None:
pdb_path = self.conf.persistdir + "/groups"
self._igroups = yum.igroups.InstalledGroups(db_path=pdb_path)
return self._igroups
# properties so they auto-create themselves with defaults
repos = property(fget=lambda self: self._getRepos(),
fset=lambda self, value: setattr(self, "_repos", value),
fdel=lambda self: self._delRepos(),
doc="Repo Storage object - object of yum repositories")
pkgSack = property(fget=lambda self: self._getSacks(),
fset=lambda self, value: setattr(self, "_pkgSack", value),
fdel=lambda self: self._delSacks(),
doc="Package sack object - object of yum package objects")
conf = property(fget=lambda self: self._getConfig(),
fset=lambda self, value: setattr(self, "_conf", value),
fdel=lambda self: setattr(self, "_conf", None),
doc="Yum Config Object")
rpmdb = property(fget=lambda self: self._getRpmDB(),
fset=lambda self, value: setattr(self, "_rpmdb", value),
fdel=lambda self: setattr(self, "_rpmdb", None),
doc="RpmSack object")
tsInfo = property(fget=lambda self: self._getTsInfo(),
fset=lambda self,value: self._setTsInfo(value),
fdel=lambda self: self._delTsInfo(),
doc="Transaction Set information object")
ts = property(fget=lambda self: self._getActionTs(),
fdel=lambda self: self._deleteTs(),
doc="TransactionSet object")
up = property(fget=lambda self: self._getUpdates(),
fset=lambda self, value: setattr(self, "_up", value),
fdel=lambda self: setattr(self, "_up", None),
doc="Updates Object")
comps = property(fget=lambda self: self._getGroups(),
fset=lambda self, value: self._setGroups(value),
fdel=lambda self: setattr(self, "_comps", None),
doc="Yum Component/groups object")
history = property(fget=lambda self: self._getHistory(),
fset=lambda self, value: setattr(self, "_history",value),
fdel=lambda self: setattr(self, "_history", None),
doc="Yum History Object")
igroups = property(fget=lambda self: self._getIGroups(),
fset=lambda self, value: setattr(self, "_igroups",value),
fdel=lambda self: setattr(self, "_igroups", None),
doc="Yum Installed Groups Object")
pkgtags = property(fget=lambda self: self._getTags(),
fset=lambda self, value: setattr(self, "_tags",value),
fdel=lambda self: setattr(self, "_tags", None),
doc="Yum Package Tags Object")
upinfo = property(fget=lambda self: self._getUpdateinfo(),
fset=lambda self, value: setattr(self, "_upinfo", value),
fdel=lambda self: setattr(self, "_upinfo", None),
doc="Yum Update Info Object")
fssnap = property(fget=lambda self: self._getFSsnap(),
fset=lambda self, value: setattr(self, "_fssnap",value),
fdel=lambda self: setattr(self, "_fssnap", None),
doc="Yum FS snapshot Object")
def doSackFilelistPopulate(self):
"""Convenience function to populate the repositories with the
filelist metadata, and emit a log message only if new
information is actually populated.
"""
necessary = False
# I can't think of a nice way of doing this, we have to have the sack here
# first or the below does nothing so...
if self.pkgSack:
for repo in self.repos.listEnabled():
if repo in repo.sack.added:
if 'filelists' in repo.sack.added[repo]:
continue
else:
necessary = True
else:
necessary = True
if necessary:
msg = _('Importing additional filelist information')
self.verbose_logger.log(logginglevels.INFO_2, msg)
self.repos.populateSack(mdtype='filelists')
def yumUtilsMsg(self, func, prog):
"""Output a message that the given tool requires the yum-utils
package, if it not installed.
:param func: the function to output the message
:param prog: the name of the tool that requires yum-utils
"""
if self.rpmdb.contains(name="yum-utils"):
return
func(_("The program %s is found in the yum-utils package.") % self._try_bold(prog))
def buildTransaction(self, unfinished_transactions_check=True):
"""Go through the packages in the transaction set, find them
in the packageSack or rpmdb, and pack up the transaction set
accordingly.
:param unfinished_transactions_check: whether to check for
unfinished transactions before building the new transaction
"""
# FIXME: This is horrible, see below and yummain. Maybe create a real
# rescode object? :(
self._depsolving_failed = False
if (unfinished_transactions_check and
misc.find_unfinished_transactions(yumlibpath=self.conf.persistdir)):
msg = _('There are unfinished transactions remaining. You might ' \
'consider running yum-complete-transaction, or' \
' "yum-complete-transaction --cleanup-only" and' \
' "yum history redo last", first to finish them. If those' \
' don\'t work you\'ll have to try removing/installing' \
' packages by hand (maybe package-cleanup can help).')
self.logger.critical(msg)
self.yumUtilsMsg(self.logger.critical, "yum-complete-transaction")
time.sleep(3)
# XXX - we could add a conditional here to avoid running the plugins and
# limit_installonly_pkgs, etc - if we're being run from yum-complete-transaction
# and don't want it to happen. - skv
self.plugins.run('preresolve')
ds_st = time.time()
(rescode, restring) = self.resolveDeps()
self._limit_installonly_pkgs()
# if enabled clean up requirments when removing the things which brought them in.
if self.conf.clean_requirements_on_remove:
self.verbose_logger.log(logginglevels.INFO_2, _('--> Finding unneeded leftover dependencies'))
self._remove_old_deps()
# We _must_ get rid of all the used tses before we go on, so that C-c
# works for downloads / mirror failover etc.
kern_pkgtup = None
if rescode == 2 and self.conf.protected_packages:
kern_pkgtup =misc.get_running_kernel_pkgtup(self.rpmdb.readOnlyTS())
self.rpmdb.ts = None
# do the skip broken magic, if enabled and problems exist
(rescode, restring) = self._doSkipBroken(rescode, restring)
self.plugins.run('postresolve', rescode=rescode, restring=restring)
if self.tsInfo.changed:
(rescode, restring) = self.resolveDeps(rescode == 1)
# If transaction was changed by postresolve plugins then we should run skipbroken again
(rescode, restring) = self._doSkipBroken(rescode, restring, clear_skipped=False )
if self.tsInfo._pkgSack is not None: # Transactions have pkgSack?
self.tsInfo.pkgSack.dropCachedData()
# FIXME: This is horrible, see below and yummain. Maybe create a real
# rescode object? :(
self._depsolving_failed = rescode == 1
txmbrs = []
if rescode == 2 and self.conf.protected_multilib and self.arch.multilib:
txmbrs = self.tsInfo.getMembersWithState(None, TS_INSTALL_STATES)
vers = {}
for txmbr in txmbrs:
if self.allowedMultipleInstalls(txmbr.po):
continue # Just allow these, it's easier.
# In theory we could skip noarch packages here, but it's really
# fast and there are some edge cases where it'll help.
if txmbr.name not in vers:
vers[txmbr.name] = [txmbr.po]
continue
vers[txmbr.name].append(txmbr.po)
fine = []
xrestring = []
for pkgname in vers:
if len(vers[pkgname]) <= 1:
# We have to go govelling through the rpmdb data to get
for pkg in self.rpmdb.searchNames([pkgname]):
if self.tsInfo.getMembersWithState(pkg.pkgtup,
TS_REMOVE_STATES):
continue
vers[pkgname].append(pkg)
# If we have multiple packages, they should be of different arches
# and so if all the versions are equal, we should be fine.
first = vers[pkgname][0]
for other in vers[pkgname][1:]:
if first.verEQ(other):
continue
msg = _('Protected multilib versions: %s != %s')
if not xrestring:
# People are confused about protected mutilib ... so give
# them a nicer message.
bigmsg = _("""\
Multilib version problems found. This often means that the root
cause is something else and multilib version checking is just
pointing out that there is a problem. Eg.:
1. You have an upgrade for %(name)s which is missing some
dependency that another package requires. Yum is trying to
solve this by installing an older version of %(name)s of the
different architecture. If you exclude the bad architecture
yum will tell you what the root cause is (which package
requires what). You can try redoing the upgrade with
--exclude %(name)s.otherarch ... this should give you an error
message showing the root cause of the problem.
2. You have multiple architectures of %(name)s installed, but
yum can only see an upgrade for one of those architectures.
If you don't want/need both architectures anymore then you
can remove the one with the missing update and everything
will work.
3. You have duplicate versions of %(name)s installed already.
You can use "yum check" to get yum show these errors.
...you can also use --setopt=protected_multilib=false to remove
this checking, however this is almost never the correct thing to
do as something else is very likely to go wrong (often causing
much more problems).
""") % {'name' : pkgname}
msg = bigmsg + msg
xrestring.append(msg % (first, other))
if xrestring:
rescode = 1
self._depsolving_failed = False
restring = xrestring
# This is a version of the old "protect-packages" plugin, it allows
# you to erase duplicates and do remove+install.
# But we don't allow you to turn it off!:)
protect_states = [TS_OBSOLETED, TS_ERASE]
txmbrs = []
if rescode == 2 and self.conf.protected_packages:
protected = set(self.conf.protected_packages)
txmbrs = self.tsInfo.getMembersWithState(None, protect_states)
bad_togo = {}
for txmbr in txmbrs:
if kern_pkgtup is not None and txmbr.pkgtup == kern_pkgtup:
pass
elif kern_pkgtup is not None and txmbr.name == kern_pkgtup[0]:
# We don't care if they've explicitly set protected on the
# kernel package. Because we don't allow you to uninstall the
# running one so it has _special_ semantics anyway.
continue
elif txmbr.name not in protected:
continue
if txmbr.name not in bad_togo:
bad_togo[txmbr.name] = []
bad_togo[txmbr.name].append(txmbr.pkgtup)
for ipkg in self.rpmdb.searchNames(bad_togo.keys()):
if (kern_pkgtup is not None and ipkg.name == kern_pkgtup[0] and
kern_pkgtup in bad_togo[kern_pkgtup[0]]):
continue # If "running kernel" matches, it's always bad.
if ipkg.name not in bad_togo:
continue
# If there is at least one version not being removed, allow it
if ipkg.pkgtup not in bad_togo[ipkg.name]:
del bad_togo[ipkg.name]
for pkgname in bad_togo.keys():
if (kern_pkgtup is not None and pkgname == kern_pkgtup[0] and
kern_pkgtup in bad_togo[kern_pkgtup[0]]):
continue # If "running kernel" matches, it's always bad.
for txmbr in self.tsInfo.matchNaevr(name=pkgname):
if txmbr.name not in bad_togo:
continue
if txmbr.pkgtup in bad_togo[ipkg.name]:
continue
# If we are installing one version we aren't removing, allow it
if txmbr.output_state in TS_INSTALL_STATES:
del bad_togo[ipkg.name]
if bad_togo:
rescode = 1
restring = []
for pkgname in sorted(bad_togo):
restring.append(_('Trying to remove "%s", which is protected') %
pkgname)
self._depsolving_failed = False
if rescode == 2:
self.save_ts(auto=True)
self.verbose_logger.debug('Depsolve time: %0.3f' % (time.time() - ds_st))
return rescode, restring
def _doSkipBroken(self,rescode, restring, clear_skipped=True):
''' do skip broken if it is enabled '''
# if depsolve failed and skipbroken is enabled
# The remove the broken packages from the transactions and
# Try another depsolve
if self.conf.skip_broken and rescode==1:
if clear_skipped:
self.skipped_packages = [] # reset the public list of skipped packages.
sb_st = time.time()
rescode, restring = self._skipPackagesWithProblems(rescode, restring)
self._printTransaction()
self.verbose_logger.debug('Skip-Broken time: %0.3f' % (time.time() - sb_st))
return (rescode, restring)
def _skipPackagesWithProblems(self, rescode, restring):
''' Remove the packages with depsolve errors and depsolve again '''
def _remove(po, depTree, toRemove):
if not po:
return
self._getPackagesToRemove(po, depTree, toRemove)
# Only remove non installed packages from pkgSack
_remove_from_sack(po)
def _remove_from_sack(po):
# get all compatible arch packages from pkgSack
# we need to remove them too so i386 packages are not
# dragged in when a x86_64 is skipped.
pkgs = self._getPackagesToRemoveAllArch(po)
for pkg in pkgs:
if not po.repoid == 'installed' and pkg not in removed_from_sack:
self.verbose_logger.debug('SKIPBROKEN: removing %s from pkgSack & updates' % str(po))
self.pkgSack.delPackage(pkg)
self.up.delPackage(pkg.pkgtup)
removed_from_sack.add(pkg)
# Keep removing packages & Depsolve until all errors is gone
# or the transaction is empty
count = 0
skipped_po = set()
removed_from_sack = set()
orig_restring = restring # Keep the old error messages
looping = 0
while (len(self.po_with_problems) > 0 and rescode == 1):
count += 1
# Remove all the rpmdb cache data, this is somewhat heavy handed
# but easier than removing/altering specific bits of the cache ...
# and skip-broken shouldn't care too much about speed.
self.rpmdb.transactionReset()
self.installedFileRequires = None # Kind of hacky
self.verbose_logger.debug("SKIPBROKEN: ########### Round %i ################" , count)
if count == 30: # Failsafe, to avoid endless looping
self.verbose_logger.debug('SKIPBROKEN: Too many loops ')
break
self._printTransaction()
depTree = self._buildDepTree()
startTs = set(self.tsInfo)
toRemove = set()
for po,wpo,err in self.po_with_problems:
# check if the problem is caused by a package in the transaction
if not self.tsInfo.exists(po.pkgtup):
_remove(wpo, depTree, toRemove)
else:
_remove(po, depTree, toRemove)
for po in toRemove:
skipped = self._skipFromTransaction(po)
for skip in skipped:
skipped_po.add(skip)
# make sure we get the compat arch packages skip from pkgSack and up too.
if skip not in removed_from_sack and skip.repoid != 'installed':
_remove_from_sack(skip)
# Nothing was removed, so we still got a problem
# the first time we get here we reset the resolved members of
# tsInfo and takes a new run all members in the current transaction
if not toRemove:
looping += 1
if looping > 2:
break # Bail out
else:
self.verbose_logger.debug('SKIPBROKEN: resetting already resolved packages (no packages to skip)' )
self.tsInfo.resetResolved(hard=True)
rescode, restring = self.resolveDeps(True, skipping_broken=True)
endTs = set(self.tsInfo)
# Check if tsInfo has changes since we started to skip packages
# if there is no changes then we got a loop.
# the first time we get here we reset the resolved members of
# tsInfo and takes a new run all members in the current transaction
if startTs-endTs == set():
looping += 1
if looping > 2:
break # Bail out
else:
self.verbose_logger.debug('SKIPBROKEN: resetting already resolved packages (transaction not changed)' )
self.tsInfo.resetResolved(hard=True)
else:
# Reset the looping counter, because it is only a loop if the same transaction is
# unchanged two times in row, not if it has been unchanged in a early stage.
looping = 0
# if we are all clear, then we have to check that the whole current transaction
# can complete the depsolve without error, because the packages skipped
# can have broken something that passed the tests earlier.
# FIXME: We need do this in a better way.
if rescode != 1:
self.verbose_logger.debug('SKIPBROKEN: sanity check the current transaction' )
self.tsInfo.resetResolved(hard=True)
self._checkMissingObsoleted() # This is totally insane, but needed :(
self._checkUpdatedLeftovers() # Cleanup updated leftovers
rescode, restring = self.resolveDeps()
if rescode != 1:
self.verbose_logger.debug("SKIPBROKEN: took %i rounds ", count)
self.verbose_logger.info(_('\nPackages skipped because of dependency problems:'))
skipped_list = [p for p in skipped_po]
skipped_list.sort()
for po in skipped_list:
msg = _(" %s from %s") % (str(po),po.repo.id)
self.verbose_logger.info(msg)
self.skipped_packages.extend(skipped_list) # make the skipped packages public
else:
# If we can't solve the problems the show the original error messages.
self.verbose_logger.info("Skip-broken could not solve problems")
return 1, orig_restring
return rescode, restring
def _add_not_found(self, pkgs, nevra_dict):
if pkgs:
return None
pkgtup = (nevra_dict['name'], nevra_dict['arch'],
nevra_dict['epoch'], nevra_dict['version'],
nevra_dict['release'])
if None in pkgtup:
return None
return pkgtup
def _add_not_found_a(self, pkgs, nevra_dict={}, pkgtup=None):
if pkgtup is None and nevra_dict:
pkgtup = self._add_not_found(pkgs, nevra_dict)
if pkgtup is None:
return
self._not_found_a[pkgtup] = YumNotFoundPackage(pkgtup)
def _add_not_found_i(self, pkgs, nevra_dict={}, pkgtup=None):
if pkgtup is None and nevra_dict:
pkgtup = self._add_not_found(pkgs, nevra_dict)
if pkgtup is None:
return
self._not_found_i[pkgtup] = YumNotFoundPackage(pkgtup)
def _checkMissingObsoleted(self):
"""
If multiple packages is obsoleting the same package
then the TS_OBSOLETED can get removed from the transaction
so we must make sure that they, exist and else create them
"""
for txmbr in self.tsInfo.getMembersWithState(None, [TS_OBSOLETING,TS_OBSOLETED]):
for pkg in txmbr.obsoletes:
if not self.tsInfo.exists(pkg.pkgtup):
obs = self.tsInfo.addObsoleted(pkg,txmbr.po)
self.verbose_logger.debug('SKIPBROKEN: Added missing obsoleted %s (%s)' % (pkg,txmbr.po) )
for pkg in txmbr.obsoleted_by:
# check if the obsoleting txmbr is in the transaction
# else remove the obsoleted txmbr
# it clean out some really weird cases
if not self.tsInfo.exists(pkg.pkgtup):
self.verbose_logger.debug('SKIPBROKEN: Remove extra obsoleted %s (%s)' % (txmbr.po,pkg) )
self.tsInfo.remove(txmbr.po.pkgtup)
def _checkUpdatedLeftovers(self):
"""
If multiple packages is updated the same package
and this package get removed because of a dep issue
then make sure that all the TS_UPDATED get removed.
"""
for txmbr in self.tsInfo.getMembersWithState(None, [TS_UPDATED]):
for pkg in txmbr.updated_by:
# check if the updating txmbr is in the transaction
# else remove the updated txmbr
# it clean out some really weird cases with dupes installed on the system
if not self.tsInfo.exists(pkg.pkgtup):
self.verbose_logger.debug('SKIPBROKEN: Remove extra updated %s (%s)' % (txmbr.po,pkg) )
self.tsInfo.remove(txmbr.po.pkgtup)
def _getPackagesToRemoveAllArch(self,po):
''' get all compatible arch packages in pkgSack'''
pkgs = []
if self.arch.multilib:
n,a,e,v,r = po.pkgtup
# skip for all compat archs
for a in self.arch.archlist:
pkgtup = (n,a,e,v,r)
matched = self.pkgSack.searchNevra(n,e,v,r,a)
pkgs.extend(matched)
else:
pkgs.append(po)
return pkgs
def _skipFromTransaction(self,po):
skipped = []
n,a,e,v,r = po.pkgtup
# skip for all compat archs
for a in self.arch.archlist:
pkgtup = (n,a,e,v,r)
if self.tsInfo.exists(pkgtup):
for txmbr in self.tsInfo.getMembers(pkgtup):
pkg = txmbr.po
skip = self._removePoFromTransaction(pkg)
skipped.extend(skip)
return skipped
def _removePoFromTransaction(self,po):
skip = []
if self.tsInfo.exists(po.pkgtup):
self.verbose_logger.debug('SKIPBROKEN: removing %s from transaction' % str(po))
self.tsInfo.remove(po.pkgtup)
if not po.repoid == 'installed':
skip.append(po)
return skip
def _buildDepTree(self):
''' create a dictionary with po and deps '''
depTree = { }
for txmbr in self.tsInfo:
for dep in txmbr.depends_on:
depTree.setdefault(dep, []).append(txmbr.po)
# self._printDepTree(depTree)
return depTree
def _printDepTree(self, tree):
for pkg, l in tree.iteritems():
print pkg
for p in l:
print "\t", p
def _printTransaction(self):
#transaction set states
state = { TS_UPDATE : "update",
TS_INSTALL : "install",
TS_TRUEINSTALL: "trueinstall",
TS_ERASE : "erase",
TS_OBSOLETED : "obsoleted",
TS_OBSOLETING : "obsoleting",
TS_AVAILABLE : "available",
TS_UPDATED : "updated"}
self.verbose_logger.log(logginglevels.DEBUG_2,"SKIPBROKEN: Current Transaction : %i member(s) " % len(self.tsInfo))
for txmbr in sorted(self.tsInfo):
msg = "SKIPBROKEN: %-11s : %s " % (state[txmbr.output_state],txmbr.po)
self.verbose_logger.log(logginglevels.DEBUG_2, msg)
for po,rel in sorted(set(txmbr.relatedto)):
msg = "SKIPBROKEN: %s : %s" % (rel,po)
self.verbose_logger.log(logginglevels.DEBUG_2, msg)
self.verbose_logger.log(logginglevels.DEBUG_2,"SKIPBROKEN:%s" % (60 * "="))
def _getPackagesToRemove(self,po,deptree,toRemove):
'''
get the (related) pos to remove.
'''
toRemove.add(po)
for txmbr in self.tsInfo.getMembers(po.pkgtup):
for pkg in (txmbr.updates + txmbr.obsoletes):
toRemove.add(pkg)
self._getDepsToRemove(pkg, deptree, toRemove)
# Remove related packages
for (relative, relation) in txmbr.relatedto:
toRemove.add(relative)
self._getDepsToRemove(relative, deptree, toRemove)
self._getDepsToRemove(po, deptree, toRemove)
def _getDepsToRemove(self,po, deptree, toRemove):
for dep in deptree.get(po, []): # Loop trough all deps of po
more_deps = False
for txmbr in self.tsInfo.getMembers(dep.pkgtup):
txmbr.removeDep(po)
if txmbr.depends_on:
more_deps = True
break
for pkg in (txmbr.updates + txmbr.obsoletes):
toRemove.add(pkg)
if more_deps: # Others depend on this pkg, so leave it. bz#905899
continue
if dep in toRemove: # If this is true we inf. recurse, so check
continue # even though it shouldn't happen. bz#874065
toRemove.add(dep)
self._getDepsToRemove(dep, deptree, toRemove)
def _rpmdb_warn_checks(self, out=None, warn=True, chkcmd=None, header=None,
ignore_pkgs=[]):
if out is None:
out = self.logger.warning
if chkcmd is None:
chkcmd = ['dependencies', 'duplicates']
if header is None:
# FIXME: _N()
msg = _("** Found %d pre-existing rpmdb problem(s),"
" 'yum check' output follows:")
header = lambda problems: not problems or out(msg % problems)
if warn:
out(_('Warning: RPMDB altered outside of yum.'))
if type(chkcmd) in (type([]), type(set())):
chkcmd = set(chkcmd)
else:
chkcmd = set([chkcmd])
ignore_pkgtups = set((pkg.pkgtup for pkg in ignore_pkgs))
rc = 0
probs = []
if chkcmd.intersection(set(('all', 'dependencies'))):
prob2ui = {'requires' : _('missing requires'),
'conflicts' : _('installed conflict')}
for prob in self.rpmdb.check_dependencies():
if prob.pkg.pkgtup in ignore_pkgtups:
continue
if prob.problem == 'conflicts':
found = True # all the conflicting pkgs have to be ignored
for res in prob.conflicts:
if res.pkgtup not in ignore_pkgtups:
found = False
break
if found:
continue
probs.append(prob)
if chkcmd.intersection(set(('all', 'duplicates'))):
iopkgs = set(self.conf.installonlypkgs)
for prob in self.rpmdb.check_duplicates(iopkgs):
if prob.pkg.pkgtup in ignore_pkgtups:
continue
if prob.duplicate.pkgtup in ignore_pkgtups:
continue
probs.append(prob)
if chkcmd.intersection(set(('all', 'obsoleted'))):
for prob in self.rpmdb.check_obsoleted():
if prob.pkg.pkgtup in ignore_pkgtups:
continue
if prob.obsoleter.pkgtup in ignore_pkgtups:
continue
probs.append(prob)
if chkcmd.intersection(set(('all', 'provides'))):
for prob in self.rpmdb.check_provides():
if prob.pkg.pkgtup in ignore_pkgtups:
continue
probs.append(prob)
header(len(probs))
for prob in sorted(probs):
out(prob)
return probs
def runTransaction(self, cb):
"""Perform the transaction.
:param cb: an rpm callback object to use in the transaction
:return: a :class:`yum.misc.GenericHolder` containing
information about the results of the transaction
:raises: :class:`yum.Errors.YumRPMTransError` if there is a
transaction cannot be completed
"""
if (self.conf.fssnap_automatic_pre or self.conf.fssnap_automatic_post) and not self.fssnap.available:
msg = _("Snapshot support not available.")
if self.conf.fssnap_abort_on_errors in ('broken-setup', 'any'):
raise Errors.YumRPMTransError(msg="Aborting transaction.", errors=msg)
else:
self.verbose_logger.critical(msg)
if self.fssnap.available and ((self.conf.fssnap_automatic_pre or
self.conf.fssnap_automatic_post) and
self.conf.fssnap_automatic_keep):
# Automatically kill old snapshots...
snaps = self.fssnap.old_snapshots()
snaps = sorted(snaps, key=lambda x: (x['ctime'], x['origin_dev']),
reverse=True)
last = '<n/a>'
num = 0
todel = []
for snap in snaps:
num += 1
if last != snap['origin_dev']:
last = snap['origin_dev']
num = 1
continue
if num > self.conf.fssnap_automatic_keep:
todel.append(snap['dev'])
# Display something to the user?
snaps = self.fssnap.del_snapshots(devices=todel)
if len(snaps):
self.verbose_logger.info(_("Deleted %u snapshots.") % len(snaps))
if (self.fssnap.available and
(not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST) and
self.conf.fssnap_automatic_pre)):
if not self.fssnap.has_space(self.conf.fssnap_percentage):
msg = _("Not enough space to create pre. FS snapshot.")
if self.conf.fssnap_abort_on_errors in ('snapshot-failure', 'any'):
raise Errors.YumRPMTransError(msg="Aborting transaction", errors=msg)
else:
self.verbose_logger.critical(msg)
else:
tags = {'*': ['reason=automatic']} # FIXME: pre. tags
snaps = self.fssnap.snapshot(self.conf.fssnap_percentage, tags=tags)
if not snaps:
msg = _("Failed to create snapshot")
if self.conf.fssnap_abort_on_errors in ('snapshot-failure', 'any'):
raise Errors.YumRPMTransError(msg="Aborting transaction", errors=msg)
else:
self.verbose_logger.critical(msg)
for (odev, ndev) in snaps:
self.verbose_logger.info(_("Created snapshot from %s, results is: %s") % (odev, ndev))
self.plugins.run('pretrans')
# We may want to put this other places, eventually, but for now it's
# good as long as we get it right for history.
for repo in self.repos.listEnabled():
if repo._xml2sqlite_local:
self.run_with_package_names.add('yum-metadata-parser')
break
if (not self.conf.history_record or
self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST)):
frpmdbv = self.tsInfo.futureRpmDBVersion()
else:
using_pkgs_pats = list(self.run_with_package_names)
using_pkgs = self.rpmdb.returnPackages(patterns=using_pkgs_pats)
rpmdbv = self.rpmdb.simpleVersion(main_only=True)[0]
lastdbv = self.history.last()
if lastdbv is not None:
lastdbv = lastdbv.end_rpmdbversion
rpmdb_problems = []
if lastdbv is None or rpmdbv != lastdbv:
txmbrs = self.tsInfo.getMembersWithState(None, TS_REMOVE_STATES)
ignore_pkgs = [txmbr.po for txmbr in txmbrs]
output_warn = lastdbv is not None
rpmdb_problems = self._rpmdb_warn_checks(warn=output_warn,
ignore_pkgs=ignore_pkgs)
cmdline = None
if hasattr(self, 'args') and self.args:
cmdline = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
cmdline = ' '.join(self.cmds)
frpmdbv = self.tsInfo.futureRpmDBVersion()
self.history.beg(rpmdbv, using_pkgs, list(self.tsInfo),
self.skipped_packages, rpmdb_problems, cmdline)
# write out our config and repo data to additional history info
self._store_config_in_history()
if hasattr(self, '_shell_history_write'): # Only in cli...
self._shell_history_write()
self.plugins.run('historybegin')
# Just before we update the transaction, update what we think the
# rpmdb will look like. This needs to be done before the run, so that if
# "something" happens and the rpmdb is different from what we think it
# will be we store what we thought, not what happened (so it'll be an
# invalid cache).
self.rpmdb.transactionResultVersion(frpmdbv)
# transaction has started - all bets are off on our saved ts file
if self._ts_save_file is not None:
# write the saved transaction data to the addon location in history
# so we can pull it back later if we need to
savetx_msg = open(self._ts_save_file, 'r').read()
self.history.write_addon_data('saved_tx', savetx_msg)
try:
os.unlink(self._ts_save_file)
except (IOError, OSError), e:
pass
self._ts_save_file = None
if self.conf.reset_nice:
onice = os.nice(0)
if onice:
try:
os.nice(-onice)
except:
onice = 0
errors = self.ts.run(cb.callback, '')
# ts.run() exit codes are, hmm, "creative": None means all ok, empty
# list means some errors happened in the transaction and non-empty
# list that there were errors preventing the ts from starting...
if self.conf.reset_nice:
try:
os.nice(onice)
except:
pass
# make resultobject - just a plain yumgenericholder object
resultobject = misc.GenericHolder()
resultobject.return_code = 0
if errors is None:
pass
elif len(errors) == 0:
errstring = _('Warning: scriptlet or other non-fatal errors occurred during transaction.')
self.verbose_logger.debug(errstring)
resultobject.return_code = 1
else:
if self.conf.history_record and not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
herrors = [to_unicode(to_str(x)) for x in errors]
self.plugins.run('historyend')
self.history.end(rpmdbv, 2, errors=herrors)
self.logger.critical(_("Transaction couldn't start:"))
for e in errors:
self.logger.critical(e[0]) # should this be 'to_unicoded'?
raise Errors.YumRPMTransError(msg=_("Could not run transaction."),
errors=errors)
if not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
self.cleanUsedHeadersPackages()
if not self.conf.keepcache and self._cashe:
self._cashe.cleanup()
for i in ('ts_all_fn', 'ts_done_fn'):
if hasattr(cb, i):
fn = getattr(cb, i)
try:
misc.unlink_f(fn)
except (IOError, OSError), e:
self.logger.critical(_('Failed to remove transaction file %s') % fn)
# drop out the rpm cache so we don't step on bad hdr indexes
if (self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST) or
resultobject.return_code):
self.rpmdb.dropCachedData()
else:
self.rpmdb.dropCachedDataPostTransaction(list(self.tsInfo))
self.plugins.run('posttrans')
# sync up what just happened versus what is in the rpmdb
if not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
vTcb = None
if hasattr(cb, 'verify_txmbr'):
vTcb = cb.verify_txmbr
self.verifyTransaction(resultobject, vTcb)
if self.conf.group_command == 'objects':
self.igroups.save()
if (self.fssnap.available and
(not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST) and
self.conf.fssnap_automatic_post)):
if not self.fssnap.has_space(self.conf.fssnap_percentage):
msg = _("Not enough space to create post trans FS snapshot.")
self.verbose_logger.critical(msg)
else:
tags = {'*': ['reason=automatic']} # FIXME: post tags
snaps = self.fssnap.snapshot(self.conf.fssnap_percentage, tags=tags)
if not snaps:
self.verbose_logger.critical(_("Failed to create snapshot"))
for (odev, ndev) in snaps:
self.verbose_logger.info(_("Created snapshot from %s, results is: %s") % (odev, ndev))
return resultobject
def verifyTransaction(self, resultobject=None, txmbr_cb=None):
"""Check that the transaction did what was expected, and
propagate external yumdb information. Output error messages
if the transaction did not do what was expected.
:param resultobject: the :class:`yum.misc.GenericHolder`
object returned from the :func:`runTransaction` call that
ran the transaction
:param txmbr_cb: the callback for the rpm transaction members
"""
# check to see that the rpmdb and the tsInfo roughly matches
# push package object metadata outside of rpmdb into yumdb
# delete old yumdb metadata entries
# for each pkg in the tsInfo
# if it is an install - see that the pkg is installed
# if it is a remove - see that the pkg is no longer installed, provided
# that there is not also an install of this pkg in the tsInfo (reinstall)
# for any kind of install add from_repo to the yumdb, and the cmdline
# and the install reason
def _call_txmbr_cb(txmbr, count):
if txmbr_cb is not None:
count += 1
txmbr_cb(txmbr, count)
return count
oil = self.conf.override_install_langs
if not oil:
oil = rpm.expandMacro("%_install_langs")
if oil == 'all':
oil = ''
elif oil:
oil = ":".join(sorted(oil.split(':')))
vt_st = time.time()
self.plugins.run('preverifytrans')
count = 0
for txmbr in self.tsInfo:
if txmbr.output_state in TS_INSTALL_STATES:
if not self.rpmdb.contains(po=txmbr.po):
# maybe a file log here, too
# but raising an exception is not going to do any good
self.logger.critical(_('%s was supposed to be installed' \
' but is not!') % txmbr.po)
# Note: Get Panu to do te.Failed() so we don't have to
txmbr.output_state = TS_FAILED
count = _call_txmbr_cb(txmbr, count)
continue
count = _call_txmbr_cb(txmbr, count)
po = self.getInstalledPackageObject(txmbr.pkgtup)
rpo = txmbr.po
po.yumdb_info.from_repo = rpo.repoid
po.yumdb_info.reason = txmbr.reason
po.yumdb_info.releasever = self.conf.yumvar['releasever']
for var in self.conf.yumvar: # Store all yum variables.
# Skip some of the variables...
if var == 'releasever': continue
if var == 'basearch': continue # This "never" changes.
if var == 'arch': continue
if var == 'uuid': continue
setattr(po.yumdb_info, 'var_' + var, self.conf.yumvar[var])
if oil:
po.yumdb_info.ts_install_langs = oil
if 'nocontexts' in self.conf.tsflags:
po.yumdb_info.tsflag_nocontexts = 'true'
if 'nodocs' in self.conf.tsflags:
po.yumdb_info.tsflag_nodocs = 'true'
if 'noscripts' in self.conf.tsflags:
po.yumdb_info.tsflag_noscripts = 'true'
if 'notriggers' in self.conf.tsflags:
po.yumdb_info.tsflag_notriggers = 'true'
if hasattr(self, 'args') and self.args:
po.yumdb_info.command_line = ' '.join(self.args)
elif hasattr(self, 'cmds') and self.cmds:
po.yumdb_info.command_line = ' '.join(self.cmds)
csum = rpo.returnIdSum()
if csum is not None:
po.yumdb_info.checksum_type = str(csum[0])
po.yumdb_info.checksum_data = str(csum[1])
if isinstance(rpo, YumLocalPackage):
try:
st = os.stat(rpo.localPkg())
lp_ctime = str(int(st.st_ctime))
lp_mtime = str(int(st.st_mtime))
po.yumdb_info.from_repo_revision = lp_ctime
po.yumdb_info.from_repo_timestamp = lp_mtime
except: pass
if rpo.xattr_origin_url is not None:
po.yumdb_info.origin_url = rpo.xattr_origin_url
if hasattr(rpo.repo, 'repoXML'):
md = rpo.repo.repoXML
if md and md.revision is not None:
po.yumdb_info.from_repo_revision = str(md.revision)
if md:
po.yumdb_info.from_repo_timestamp = str(md.timestamp)
if hasattr(txmbr, 'group_member'):
# FIXME:
po.yumdb_info.group_member = txmbr.group_member
loginuid = misc.getloginuid()
if txmbr.updates or txmbr.downgrades or txmbr.reinstall:
if txmbr.updates:
opo = txmbr.updates[0]
elif txmbr.downgrades:
opo = txmbr.downgrades[0]
else:
opo = po
if 'installed_by' in opo.yumdb_info:
po.yumdb_info.installed_by = opo.yumdb_info.installed_by
if 'group_member' in opo.yumdb_info:
po.yumdb_info.group_member = opo.yumdb_info.group_member
if loginuid is not None:
po.yumdb_info.changed_by = str(loginuid)
elif loginuid is not None:
po.yumdb_info.installed_by = str(loginuid)
if self.conf.history_record:
self.history.sync_alldb(po)
# Remove old ones after installing new ones, so we can copy values.
for txmbr in self.tsInfo:
if txmbr.output_state in TS_INSTALL_STATES:
pass
elif txmbr.output_state in TS_REMOVE_STATES:
if self.rpmdb.contains(po=txmbr.po):
if not self.tsInfo.getMembersWithState(pkgtup=txmbr.pkgtup,
output_states=TS_INSTALL_STATES):
# maybe a file log here, too
# but raising an exception is not going to do any good
# Note: This actually triggers atm. because we can't
# always find the erased txmbr to set it when
# we should.
self.logger.critical(_('%s was supposed to be removed' \
' but is not!' % txmbr.po))
# Note: Get Panu to do te.Failed() so we don't have to
txmbr.output_state = TS_FAILED
count = _call_txmbr_cb(txmbr, count)
continue
count = _call_txmbr_cb(txmbr, count)
yumdb_item = self.rpmdb.yumdb.get_package(po=txmbr.po)
yumdb_item.clean()
else:
count = _call_txmbr_cb(txmbr, count)
self.verbose_logger.log(logginglevels.DEBUG_2, 'What is this? %s' % txmbr.po)
self.plugins.run('postverifytrans')
rpmdbv = self.rpmdb.simpleVersion(main_only=True)[0]
if self.conf.history_record and not self.ts.isTsFlagSet(rpm.RPMTRANS_FLAG_TEST):
ret = -1
if resultobject is not None:
ret = resultobject.return_code
self.plugins.run('historyend')
self.history.end(rpmdbv, ret)
self.rpmdb.dropCachedData()
self.verbose_logger.debug('VerifyTransaction time: %0.3f' % (time.time() - vt_st))
def costExcludePackages(self):
"""Create an excluder for repositories with higher costs. For
example, if repo-A:cost=1 and repo-B:cost=2, this function
will set up an excluder on repo-B that looks for packages in
repo-B.
"""
# if all the repo.costs are equal then don't bother running things
costs = {}
for r in self.repos.listEnabled():
costs.setdefault(r.cost, []).append(r)
if len(costs) <= 1:
return
done = False
exid = "yum.costexcludes"
orepos = []
for cost in sorted(costs):
if done: # Skip the first one, as they have lowest cost so are good.
for repo in costs[cost]:
yce = _YumCostExclude(repo, self.repos)
repo.sack.addPackageExcluder(repo.id, exid,
'exclude.pkgtup.in', yce)
orepos.extend(costs[cost])
done = True
def excludePackages(self, repo=None):
"""Remove packages from packageSacks based on global exclude
lists, command line excludes and per-repository excludes.
:param repo: a repo object to use. If not given, all
repositories are used
"""
if "all" in self.conf.disable_excludes:
return
# if not repo: then assume global excludes, only
# if repo: then do only that repos' packages and excludes
if not repo: # global only
if "main" in self.conf.disable_excludes:
return
excludelist = self.conf.exclude
repoid = None
exid_beg = 'yum.excludepkgs'
else:
if repo.id in self.conf.disable_excludes:
return
excludelist = repo.getExcludePkgList()
repoid = repo.id
exid_beg = 'yum.excludepkgs.' + repoid
count = 0
for match in excludelist:
count += 1
exid = "%s.%u" % (exid_beg, count)
self.pkgSack.addPackageExcluder(repoid, exid,'exclude.match', match)
def includePackages(self, repo):
"""Remove packages from packageSacks based on list of
packages to include.
:param repo: the repository to use
"""
for di in getattr(self.conf, 'disable_includes', []):
if di == 'all' or di == repo.id:
return
includelist = repo.getIncludePkgList()
if len(includelist) == 0:
return
# includepkgs actually means "exclude everything that doesn't match".
# So we mark everything, then wash those we want to keep and then
# exclude everything that is marked.
exid = "yum.includepkgs.1"
self.pkgSack.addPackageExcluder(repo.id, exid, 'mark.washed')
count = 0
for match in includelist:
count += 1
exid = "%s.%u" % ("yum.includepkgs.2", count)
self.pkgSack.addPackageExcluder(repo.id, exid, 'wash.match', match)
exid = "yum.includepkgs.3"
self.pkgSack.addPackageExcluder(repo.id, exid, 'exclude.marked')
def doLock(self, lockfile = YUM_PID_FILE):
"""Acquire the yum lock.
:param lockfile: the file to use for the lock
:raises: :class:`yum.Errors.LockError`
"""
if self.conf.uid != 0:
# If we are a user, assume we are using the root cache ... so don't
# bother locking.
if self.conf.cache:
return
root = self.conf.cachedir
# Don't want <cachedir>/var/run/yum.pid ... just: <cachedir>/yum.pid
lockfile = os.path.basename(lockfile)
else:
root = self.conf.installroot
lockfile = root + '/' + lockfile # lock in the chroot
lockfile = os.path.normpath(lockfile) # get rid of silly preceding extra /
mypid=str(os.getpid())
while True:
ret = self._lock(lockfile, mypid, 0644)
if ret:
break
oldpid = self._get_locker(lockfile)
if not oldpid:
# Invalid locker: unlink lockfile and retry
self._unlock(lockfile)
continue
if oldpid == os.getpid(): # if we own the lock, we're fine
ret = 1
break
# Another copy seems to be running.
msg = _('Existing lock %s: another copy is running as pid %s.') % (lockfile, oldpid)
raise Errors.LockError(0, msg, oldpid)
if ret == 2:
# Means lockdir isn't setup, out of bad options just run without
# locks.
return
assert ret == 1
# We've got the lock, store it so we can auto-unlock on __del__...
self._lockfile = lockfile
def doUnlock(self, lockfile=None):
"""Release the yum lock.
:param lockfile: the lock file to use. If not given, the file
that was given as a parameter to the :func:`doLock` call
that closed the lock is used
"""
# if we're not root then we don't lock - just return nicely
# Note that we can get here from __del__, so if we haven't created
# YumBase.conf we don't want to do so here as creating stuff inside
# __del__ is bad.
if hasattr(self, 'preconf'):
return
# Obviously, we can't lock random places as non-root, but we still want
# to get rid of our lock file. Given we now have _lockfile I'm pretty
# sure nothing should ever pass lockfile in here anyway.
if self.conf.uid != 0:
lockfile = None
if lockfile is not None:
root = self.conf.installroot
lockfile = root + '/' + lockfile # lock in the chroot
elif self._lockfile is None:
return # Don't delete other people's lock files on __del__
else:
lockfile = self._lockfile # Get the value we locked with
self._unlock(lockfile)
self._lockfile = None
@staticmethod
def _lock(filename, contents='', mode=0777):
lockdir = os.path.dirname(filename)
try:
if not os.path.exists(lockdir):
# We used to os.makedirs(lockdir, mode=0755) ... but that
# causes problems now due to /var/run being a magic systemd dir.
# So we now just give up and run, hopefully nobody runs N
# instances before the magic dir. is activate.
return 2
fd = os.open(filename, os.O_EXCL|os.O_CREAT|os.O_WRONLY, mode)
os.write(fd, contents)
os.close(fd)
return 1
except OSError, msg:
if not msg.errno == errno.EEXIST:
# Whoa. What the heck happened?
errmsg = _('Could not create lock at %s: %s ') % (filename, exception2msg(msg))
raise Errors.LockError(msg.errno, errmsg, int(contents))
return 0
@staticmethod
def _unlock(filename):
misc.unlink_f(filename)
@staticmethod
def _get_locker(lockfile):
try: fd = open(lockfile, 'r')
except (IOError, OSError), e:
msg = _("Could not open lock %s: %s") % (lockfile, e)
raise Errors.LockError(errno.EPERM, msg)
try: oldpid = int(fd.readline())
except ValueError:
return None # Bogus pid
try:
stat = open("/proc/%d/stat" % oldpid).readline()
if stat.split()[2] == 'Z':
return None # The pid is a zombie
except IOError:
# process dead or /proc not mounted
try: os.kill(oldpid, 0)
except OSError, e:
if e[0] == errno.ESRCH:
return None # The pid doesn't exist
# Whoa. What the heck happened?
msg = _('Unable to check if PID %s is active') % oldpid
raise Errors.LockError(errno.EPERM, msg, oldpid)
return oldpid
def verifyPkg(self, fo, po, raiseError):
"""Check that the checksum of a remote package matches what we
expect it to be. If the checksum of the package file is
wrong, and the file is also larger than expected, it cannot be
redeemed, so delete it.
:param fo: the file object of the package
:param po: the package object to verify
:param raiseError: if *raiseError* is 1, and the package
does not check out, a :class:`URLGrabError will be raised.
Defaults to 0
:return: True if the package is verified successfully.
Otherwise, False will be returned, unless *raiseError* is
1, in which case a :class:`URLGrabError` will be raised
:raises: :class:`URLGrabError` if verification fails, and
*raiseError* is 1
"""
failed = False
if type(fo) is types.InstanceType:
fo = fo.filename
if fo != po.localPkg():
po.localpath = fo
if not po.verifyLocalPkg():
failed = True
else:
ylp = YumLocalPackage(self.rpmdb.readOnlyTS(), fo)
if ylp.pkgtup != po.pkgtup:
failed = True
if failed:
# if the file is wrong AND it is >= what we expected then it
# can't be redeemed. If we can, kill it and start over fresh
cursize = os.stat(fo)[6]
totsize = long(po.size)
if cursize >= totsize and not po.repo.cache:
# if the path to the file is NOT inside the pkgdir then don't
# unlink it b/c it is probably a file:// url and possibly not
# unlinkable
if fo.startswith(po.repo.pkgdir):
os.unlink(fo)
if raiseError:
msg = _('Package does not match intended download. Suggestion: run yum --enablerepo=%s clean metadata and try again') % po.repo.id
raise URLGrabError(-1, msg)
else:
return False
return True
def verifyChecksum(self, fo, checksumType, csum):
"""Verify that the checksum of the given file matches the
given checksum.
:param fo: the file object to verify the checksum of
:param checksumType: the type of checksum to use
:parm csum: the checksum to check against
:return: 0 if the checksums match
:raises: :class:`URLGrabError` if there is an error performing
the checksums, or the checksums do not match
"""
try:
filesum = misc.checksum(checksumType, fo)
except Errors.MiscError, e:
raise URLGrabError(-3, _('Could not perform checksum'))
if filesum != csum:
raise URLGrabError(-1, _('Package does not match checksum'))
return 0
def downloadPkgs(self, pkglist, callback=None, callback_total=None):
"""Download the packages specified by the given list of
package objects.
:param pkglist: a list of package objects specifying the
packages to download
:param callback: unused
:param callback_total: a callback to output messages about the
download operation
:return: a dictionary containing errors from the downloading process
:raises: :class:`URLGrabError`
"""
def mediasort(apo, bpo):
# FIXME: we should probably also use the mediaid; else we
# could conceivably ping-pong between different disc1's
a = apo.getDiscNum()
b = bpo.getDiscNum()
if a is None and b is None:
# deltas first to start rebuilding asap
return cmp(isinstance(bpo, DeltaPackage),
isinstance(apo, DeltaPackage)) or cmp(apo, bpo)
if a is None:
return -1
if b is None:
return 1
if a < b:
return -1
elif a > b:
return 1
return 0
errors = {}
def adderror(po, msg):
errors.setdefault(po, []).append(msg)
if po.localpath.endswith('.tmp'):
misc.unlink_f(po.localpath) # won't resume this..
# We close the history DB here because some plugins (presto) use
# threads. And sqlite really doesn't like threads. And while I don't
# think it should matter, we've had some reports of history DB
# corruption, and it was implied that it happened just after C-c
# at download time and this is a safe thing to do.
# Note that manual testing shows that history is not connected by
# this point, from the cli with no plugins. So this really does
# nothing *sigh*.
self.history.close()
self.plugins.run('predownload', pkglist=pkglist)
beenthere = set() # only once, please. BZ 468401
downloadonly = getattr(self.conf, 'downloadonly', False)
remote_pkgs = []
remote_size = 0
def verify_local(po):
local = po.localPkg()
if local in beenthere:
# This is definitely a depsolver bug. Make it fatal?
self.verbose_logger.warn(_("ignoring a dupe of %s") % po)
return True
beenthere.add(local)
if downloadonly and not os.path.exists(local):
# Check before we munge the name...
po.repo._preload_pkg_from_system_cache(po)
if os.path.exists(local):
if self.verifyPkg(local, po, False):
self.verbose_logger.debug(_("using local copy of %s") % po)
return True
if po.repo.cache:
adderror(po, _('package fails checksum but caching is '
'enabled for %s') % po.repo.id)
return False
if downloadonly:
po.localpath += '.%d.tmp' % os.getpid()
try: os.rename(local, po.localpath)
except OSError: pass
po.returnIdSum()
po.basepath # prefetch now; fails when repos are closed
return False
pkgs = []
for po in pkglist:
if hasattr(po, 'pkgtype') and po.pkgtype == 'local':
continue
if verify_local(po):
continue
if errors:
return errors
pkgs.append(po)
# download presto metadata and use drpms
presto = DeltaInfo(self, pkgs, adderror)
deltasize = rpmsize = 0
for po in pkgs:
if isinstance(po, DeltaPackage):
if verify_local(po):
# there's .drpm already, use it
presto.rebuild(po)
continue
deltasize += po.size
rpmsize += po.rpm.size
remote_pkgs.append(po)
remote_size += po.size
if deltasize:
self.verbose_logger.info(_('Delta RPMs reduced %s of updates to %s (%d%% saved)'),
format_number(rpmsize), format_number(deltasize), 100 - deltasize*100.0/rpmsize)
if downloadonly:
if hasattr(self, '_old_cachedir'):
# Try to link/copy them out, if we have somewhere to put them.
for po in pkglist:
if not po.localpath.startswith(self.conf.cachedir):
continue
end = po.localpath[len(self.conf.cachedir):]
try:
os.link(po.localpath, self._old_cachedir + end)
except:
try:
shutil.copy2(po.localpath, self._old_cachedir + end)
except:
pass
# close DBs, unlock
self.repos.close()
self.closeRpmDB()
self.doUnlock()
beg_download = time.time()
all_remote_pkgs = remote_pkgs
all_remote_size = remote_size
while True:
remote_pkgs.sort(mediasort)
# This is kind of a hack and does nothing in non-Fedora versions,
# we'll fix it one way or anther soon.
if (hasattr(urlgrabber.progress, 'text_meter_total_size') and
len(remote_pkgs) > 1):
urlgrabber.progress.text_meter_total_size(remote_size)
i = 0
local_size = [0]
done_repos = set()
async = hasattr(urlgrabber.grabber, 'parallel_wait')
for po in remote_pkgs:
i += 1
def checkfunc(obj, po=po):
self.verifyPkg(obj, po, 1)
if po.localpath.endswith('.tmp'):
rpmfile = po.localpath.rsplit('.', 2)[0]
os.rename(po.localpath, rpmfile)
po.localpath = rpmfile
local_size[0] += po.size
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(remote_size,
local_size[0])
if isinstance(po, DeltaPackage):
presto.rebuild(po)
return
else:
presto.dequeue_max()
if po.repoid not in done_repos:
done_repos.add(po.repoid)
# Check a single package per. repo. ... to give a hint to
# the user on big downloads.
result, errmsg = self.sigCheckPkg(po)
if result != 0:
self.verbose_logger.warn("%s", errmsg)
po.localpath = obj.filename
if po in errors:
del errors[po]
text = os.path.basename(po.relativepath)
kwargs = {}
if async and po.repo._async:
kwargs['failfunc'] = lambda obj, po=po: adderror(po, exception2msg(obj.exception))
kwargs['async'] = True
elif not (i == 1 and not local_size[0] and remote_size == po.size):
text = '(%s/%s): %s' % (i, len(remote_pkgs), text)
try:
po.repo.getPackage(po,
checkfunc=checkfunc,
text=text,
cache=po.repo.http_caching != 'none',
**kwargs
)
except Errors.RepoError, e:
adderror(po, exception2msg(e))
if async:
try:
urlgrabber.grabber.parallel_wait()
except KeyboardInterrupt:
for po in remote_pkgs:
if po.localpath.endswith('.tmp'):
misc.unlink_f(po.localpath)
elif isinstance(po, DeltaPackage) and po.rpm.localpath.endswith('.tmp'):
misc.unlink_f(po.rpm.localpath)
raise
presto.dequeue_all()
presto.wait()
if hasattr(urlgrabber.progress, 'text_meter_total_size'):
urlgrabber.progress.text_meter_total_size(0)
fatal = False
for po in errors:
if not isinstance(po, DeltaPackage):
fatal = True
break
if not errors or fatal:
break
# there were drpm related errors *only*
remote_pkgs = []
remote_size = 0
for po in errors:
po = po.rpm
remote_pkgs.append(po)
remote_size += po.size
# callback_total needs the total pkg count
all_remote_pkgs.extend(remote_pkgs)
all_remote_size += remote_size
errors.clear()
self.verbose_logger.warn(_('Some delta RPMs failed to download or rebuild. Retrying..'))
if callback_total and not errors:
callback_total(all_remote_pkgs, all_remote_size, beg_download)
if not downloadonly:
# XXX: Run unlocked? Skip this for now..
self.plugins.run('postdownload', pkglist=pkglist, errors=errors)
# Close curl object after we've downloaded everything.
if hasattr(urlgrabber.grabber, 'reset_curl_obj'):
urlgrabber.grabber.reset_curl_obj()
if downloadonly and not errors: # caller handles errors
self.verbose_logger.info(_('exiting because "Download Only" specified'))
sys.exit(self.exit_code)
return errors
def verifyHeader(self, fo, po, raiseError):
"""Check that the header of the given file object and matches
the given package.
:param fo: the file object to check
:param po: the package object to check
:param raiseError: if *raiseError* is True, a
:class:`URLGrabError` will be raised if the header matches
the package object, or cannot be read from the file. If
*raiseError* is False, 0 will be returned in the above
cases
:return: 1 if the header matches the package object, and 0 if
they do not match, and *raiseError* is False
:raises: :class:`URLGrabError` if *raiseError* is True, and
the header does not match the package object or cannot be
read from the file
"""
if type(fo) is types.InstanceType:
fo = fo.filename
try:
hlist = rpm.readHeaderListFromFile(fo)
hdr = hlist[0]
except (rpm.error, IndexError):
if raiseError:
raise URLGrabError(-1, _('Header is not complete.'))
else:
return 0
yip = YumInstalledPackage(hdr) # we're using YumInstalledPackage b/c
# it takes headers <shrug>
if yip.pkgtup != po.pkgtup:
if raiseError:
raise URLGrabError(-1, 'Header does not match intended download')
else:
return 0
return 1
def downloadHeader(self, po):
"""Download a header from a package object.
:param po: the package object to download the header from
:raises: :class:`yum.Errors.RepoError` if there are errors
obtaining the header
"""
if hasattr(po, 'pkgtype') and po.pkgtype == 'local':
return
errors = {}
local = po.localHdr()
repo = self.repos.getRepo(po.repoid)
if os.path.exists(local):
try:
result = self.verifyHeader(local, po, raiseError=1)
except URLGrabError, e:
# might add a check for length of file - if it is <
# required doing a reget
misc.unlink_f(local)
else:
po.hdrpath = local
return
else:
if self.conf.cache:
raise Errors.RepoError(_('Header not in local cache and caching-only mode enabled. Cannot download %s') % po.hdrpath,
repo=repo)
if self.dsCallback: self.dsCallback.downloadHeader(po.name)
try:
if not os.path.exists(repo.hdrdir):
os.makedirs(repo.hdrdir)
checkfunc = (self.verifyHeader, (po, 1), {})
hdrpath = repo.getHeader(po, checkfunc=checkfunc,
cache=repo.http_caching != 'none',
)
except Errors.RepoError, e:
saved_repo_error = e
try:
misc.unlink_f(local)
except OSError, e:
raise Errors.RepoError, saved_repo_error
else:
raise Errors.RepoError, saved_repo_error
else:
po.hdrpath = hdrpath
return
def sigCheckPkg(self, po):
"""Verify the GPG signature of the given package object.
:param po: the package object to verify the signature of
:return: (result, error_string)
where result is::
0 = GPG signature verifies ok or verification is not required.
1 = GPG verification failed but installation of the right GPG key
might help.
2 = Fatal GPG verification error, give up.
"""
if self._override_sigchecks:
check = False
hasgpgkey = 0
elif hasattr(po, 'pkgtype') and po.pkgtype == 'local':
check = self.conf.localpkg_gpgcheck
hasgpgkey = 0
else:
repo = self.repos.getRepo(po.repoid)
check = repo.gpgcheck
hasgpgkey = not not repo.gpgkey
if check:
ts = self.rpmdb.readOnlyTS()
sigresult = rpmUtils.miscutils.checkSig(ts, po.localPkg())
localfn = os.path.basename(po.localPkg())
if sigresult == 0:
result = 0
msg = ''
elif sigresult == 1:
if hasgpgkey:
result = 1
else:
result = 2
msg = _('Public key for %s is not installed') % localfn
elif sigresult == 2:
result = 2
msg = _('Problem opening package %s') % localfn
elif sigresult == 3:
if hasgpgkey:
result = 1
else:
result = 2
result = 1
msg = _('Public key for %s is not trusted') % localfn
elif sigresult == 4:
result = 2
msg = _('Package %s is not signed') % localfn
else:
result =0
msg = ''
return result, msg
def cleanUsedHeadersPackages(self):
"""Delete the header and package files used in the
transaction from the yum cache.
"""
filelist = []
for txmbr in self.tsInfo:
if txmbr.po.state not in TS_INSTALL_STATES:
continue
if txmbr.po.repoid == "installed":
continue
if txmbr.po.repoid not in self.repos.repos:
continue
if txmbr.po.repo.keepcache:
continue
# make sure it's not a local file
repo = self.repos.repos[txmbr.po.repoid]
local = False
for u in repo.baseurl:
if u.startswith("file:"):
local = True
break
if local:
filelist.extend([txmbr.po.localHdr()])
else:
txmbr.po.xattr_origin_url # Load this, before we rm the file.
filelist.extend([txmbr.po.localPkg(), txmbr.po.localHdr()])
# now remove them
for fn in filelist:
if not os.path.exists(fn):
continue
try:
misc.unlink_f(fn)
except OSError, e:
self.logger.warning(_('Cannot remove %s'), fn)
continue
else:
self.verbose_logger.log(logginglevels.DEBUG_4,
_('%s removed'), fn)
def cleanHeaders(self):
"""Delete the header files from the yum cache."""
exts = ['hdr']
return self._cleanFiles(exts, 'hdrdir', 'header')
def cleanPackages(self):
"""Delete the package files from the yum cache."""
exts = ['rpm']
return self._cleanFiles(exts, 'pkgdir', 'package')
def cleanSqlite(self):
"""Delete the sqlite files from the yum cache."""
exts = ['sqlite', 'sqlite.bz2', 'sqlite.gz', 'sqlite.xz',
'sqlite-journal']
return self._cleanFiles(exts, 'cachedir', 'sqlite')
def cleanMetadata(self):
"""Delete the metadata files from the yum cache."""
exts = ['xml.gz', 'xml', 'cachecookie', 'mirrorlist.txt', 'asc',
'xml.bz2', 'xml.xz']
# Metalink is also here, but is a *.xml file
return self._cleanFiles(exts, 'cachedir', 'metadata')
def cleanExpireCache(self):
"""Delete the local data saying when the metadata and mirror
lists were downloaded for each repository."""
exts = ['cachecookie', 'mirrorlist.txt']
return self._cleanFiles(exts, 'cachedir', 'metadata')
def cleanRpmDB(self):
"""Delete any cached data from the local rpmdb."""
cachedir = self.conf.persistdir + "/rpmdb-indexes/"
if not os.path.exists(cachedir):
filelist = []
else:
filelist = misc.getFileList(cachedir, '', [])
return self._cleanFilelist('rpmdb', filelist)
def _cleanFiles(self, exts, pathattr, filetype):
filelist = []
for ext in exts:
for repo in self.repos.listEnabled():
path = getattr(repo, pathattr)
if os.path.exists(path) and os.path.isdir(path):
filelist = misc.getFileList(path, ext, filelist)
return self._cleanFilelist(filetype, filelist)
def _cleanFilelist(self, filetype, filelist):
removed = 0
for item in filelist:
try:
misc.unlink_f(item)
except OSError, e:
self.logger.critical(_('Cannot remove %s file %s'), filetype, item)
continue
else:
self.verbose_logger.log(logginglevels.DEBUG_4,
_('%s file %s removed'), filetype, item)
removed+=1
msg = P_('%d %s file removed', '%d %s files removed', removed) % (removed, filetype)
return 0, [msg]
def doPackageLists(self, pkgnarrow='all', patterns=None, showdups=None,
ignore_case=False, repoid=None):
"""Return a :class:`yum.misc.GenericHolder` containing
lists of package objects. The contents of the lists are
specified in various ways by the arguments.
:param pkgnarrow: a string specifying which types of packages
lists to produces, such as updates, installed, available,
etc.
:param patterns: a list of names or wildcards specifying
packages to list
:param showdups: whether to include duplicate packages in the
lists
:param ignore_case: whether to ignore case when searching by
package names
:param repoid: repoid that all pkgs will belong to
:return: a :class:`yum.misc.GenericHolder` instance with the
following lists defined::
available = list of packageObjects
installed = list of packageObjects
updates = tuples of packageObjects (updating, installed)
extras = list of packageObjects
obsoletes = tuples of packageObjects (obsoleting, installed)
recent = list of packageObjects
"""
if showdups is None:
showdups = self.conf.showdupesfromrepos
ygh = misc.GenericHolder(iter=pkgnarrow)
installed = []
available = []
reinstall_available = []
old_available = []
updates = []
obsoletes = []
obsoletesTuples = []
recent = []
extras = []
ic = ignore_case
# list all packages - those installed and available, don't 'think about it'
if pkgnarrow == 'all':
dinst = {}
ndinst = {} # Newest versions by name.arch
for po in self.rpmdb.returnPackages(patterns=patterns,
ignore_case=ic):
if not misc.filter_pkgs_repoid([po], repoid):
continue
dinst[po.pkgtup] = po
if showdups:
continue
key = (po.name, po.arch)
if key not in ndinst or po.verGT(ndinst[key]):
ndinst[key] = po
installed = dinst.values()
if showdups:
avail = self.pkgSack.returnPackages(repoid=repoid,
patterns=patterns,
ignore_case=ic)
elif repoid:
avail = self.pkgSack.sacks[repoid]
avail = avail.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
else:
try:
avail = self.pkgSack.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
except Errors.PackageSackError:
avail = []
for pkg in avail:
if showdups:
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
else:
available.append(pkg)
else:
key = (pkg.name, pkg.arch)
if pkg.pkgtup in dinst:
reinstall_available.append(pkg)
else:
# if (self.allowedMultipleInstalls(pkg) or
# key not in ndinst):
# Might be because pattern specified a version, so
# we need to do a search for name/arch to find any
# installed. Alas. calling allowedMultipleInstalls()
# is much slower than calling searchNevra(). *Sigh*
ipkgs = self.rpmdb.searchNevra(pkg.name,
arch=pkg.arch)
ipkgs = misc.filter_pkgs_repoid(ipkgs, repoid)
if ipkgs:
ndinst[key] = sorted(ipkgs)[-1]
if key not in ndinst or pkg.verGT(ndinst[key]):
available.append(pkg)
else:
old_available.append(pkg)
# produce the updates list of tuples
elif pkgnarrow == 'updates':
for (n,a,e,v,r) in self.up.getUpdatesList():
matches = self.pkgSack.searchNevra(name=n, arch=a, epoch=e,
ver=v, rel=r)
# This is kind of wrong, depending on how you look at it.
matches = misc.filter_pkgs_repoid(matches, repoid)
if len(matches) > 1:
updates.append(matches[0])
self.verbose_logger.log(logginglevels.DEBUG_1,
_('More than one identical match in sack for %s'),
matches[0])
elif len(matches) == 1:
updates.append(matches[0])
else:
self.verbose_logger.log(logginglevels.DEBUG_1,
_('Nothing matches %s.%s %s:%s-%s from update'), n,a,e,v,r)
if patterns:
exactmatch, matched, unmatched = \
parsePackages(updates, patterns, casematch=not ignore_case)
updates = exactmatch + matched
# installed only
elif pkgnarrow == 'installed':
installed = self.rpmdb.returnPackages(patterns=patterns,
ignore_case=ic)
installed = misc.filter_pkgs_repoid(installed, repoid)
# available in a repository
elif pkgnarrow == 'available':
if showdups:
avail = self.pkgSack.returnPackages(patterns=patterns,
ignore_case=ic,
repoid=repoid)
elif repoid:
avail = self.pkgSack.sacks[repoid]
avail = avail.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
else:
try:
avail = self.pkgSack.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
except Errors.PackageSackError:
avail = []
for pkg in avail:
if showdups:
if self.rpmdb.contains(po=pkg):
reinstall_available.append(pkg)
else:
available.append(pkg)
else:
ipkgs = self.rpmdb.searchNevra(pkg.name, arch=pkg.arch)
if ipkgs:
latest = sorted(ipkgs, reverse=True)[0]
if not ipkgs or pkg.verGT(latest):
available.append(pkg)
elif pkg.verEQ(latest):
reinstall_available.append(pkg)
else:
old_available.append(pkg)
# not in a repo but installed
elif pkgnarrow == 'extras':
# we must compare the installed set versus the repo set
# anything installed but not in a repo is an extra
avail = self.pkgSack.simplePkgList(patterns=patterns,
ignore_case=ic)
avail = set(avail)
for po in self.rpmdb.returnPackages(patterns=patterns,
ignore_case=ic):
if not misc.filter_pkgs_repoid([po], repoid):
continue
if po.pkgtup not in avail:
extras.append(po)
# not in a repo but installed
elif pkgnarrow == 'distro-extras':
for po in self.rpmdb.returnPackages(patterns=patterns,
ignore_case=ic):
if not misc.filter_pkgs_repoid([po], repoid):
continue
if self.pkgSack.searchNames([po.name]):
continue
extras.append(po)
# obsoleting packages (and what they obsolete)
elif pkgnarrow == 'obsoletes':
self.conf.obsoletes = 1
for (pkgtup, instTup) in self.up.getObsoletesTuples():
(n,a,e,v,r) = pkgtup
pkgs = self.pkgSack.searchNevra(name=n, arch=a, ver=v, rel=r, epoch=e)
pkgs = misc.filter_pkgs_repoid(pkgs, repoid)
instpo = self.getInstalledPackageObject(instTup)
for po in pkgs:
obsoletes.append(po)
obsoletesTuples.append((po, instpo))
if patterns:
exactmatch, matched, unmatched = \
parsePackages(obsoletes, patterns, casematch=not ignore_case)
obsoletes = exactmatch + matched
matched_obsoletes = set(obsoletes)
nobsoletesTuples = []
for po, instpo in obsoletesTuples:
if po not in matched_obsoletes:
continue
nobsoletesTuples.append((po, instpo))
obsoletesTuples = nobsoletesTuples
if not showdups:
obsoletes = packagesNewestByName(obsoletes)
filt = set(obsoletes)
nobsoletesTuples = []
for po, instpo in obsoletesTuples:
if po not in filt:
continue
nobsoletesTuples.append((po, instpo))
obsoletesTuples = nobsoletesTuples
# packages recently added to the repositories
elif pkgnarrow == 'recent':
now = time.time()
recentlimit = now-(self.conf.recent*86400)
if showdups:
avail = self.pkgSack.returnPackages(patterns=patterns,
ignore_case=ic,
repoid=repoid)
elif repoid:
avail = self.pkgSack.sacks[repoid]
avail = avail.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
else:
try:
avail = self.pkgSack.returnNewestByNameArch(patterns=patterns,
ignore_case=ic)
except Errors.PackageSackError:
avail = []
for po in avail:
if int(po.filetime) > recentlimit:
recent.append(po)
ygh.installed = installed
ygh.available = available
ygh.reinstall_available = reinstall_available
ygh.old_available = old_available
ygh.updates = updates
ygh.obsoletes = obsoletes
ygh.obsoletesTuples = obsoletesTuples
ygh.recent = recent
ygh.extras = extras
return ygh
def findDeps(self, pkgs):
"""Return the dependencies for a given package object list, as well
as possible solutions for those dependencies.
:param pkgs: a list of package objects
:return: the dependencies as a dictionary of dictionaries:
packageobject = [reqs] = [list of satisfying pkgs]
"""
results = {}
for pkg in pkgs:
results[pkg] = {}
reqs = pkg.requires
reqs.sort()
pkgresults = results[pkg] # shorthand so we don't have to do the
# double bracket thing
for req in reqs:
(r,f,v) = req
if r.startswith('rpmlib('):
continue
satisfiers = []
for po in self.whatProvides(r, f, v):
satisfiers.append(po)
pkgresults[req] = satisfiers
return results
# pre 3.2.10 API used to always showdups, so that's the default atm.
def searchGenerator(self, fields, criteria, showdups=True, keys=False,
searchtags=True, searchrpmdb=True):
"""Yield the packages that match the given search criteria.
This generator method will lighten memory load for some
searches, and is the preferred search function to use.
:param fields: the fields to search
:param criteria: a list of strings specifying the criteria to
search for
:param showdups: whether to yield duplicate packages from
different repositories
:param keys: setting *keys* to True will use the search keys
that matched in the sorting, and return the search keys in
the results
:param searchtags: whether to search the package tags
:param searchrpmdb: whether to search the rmpdb
"""
sql_fields = []
for f in fields:
sql_fields.append(RPM_TO_SQLITE.get(f, f))
# yield the results in order of most terms matched first
sorted_lists = {} # count_of_matches = [(pkgobj,
# [search strings which matched],
# [results that matched])]
tmpres = []
real_crit = []
real_crit_lower = [] # Take the s.lower()'s out of the loop
rcl2c = {}
# weigh terms in given order (earlier = more relevant)
critweight = 0
critweights = {}
for s in criteria:
real_crit.append(s)
real_crit_lower.append(s.lower())
rcl2c[s.lower()] = s
critweights.setdefault(s, critweight)
critweight -= 1
for sack in self.pkgSack.sacks.values():
tmpres.extend(sack.searchPrimaryFieldsMultipleStrings(sql_fields, real_crit))
def results2sorted_lists(tmpres, sorted_lists):
for (po, count) in tmpres:
# check the pkg for sanity
# pop it into the sorted lists
tmpkeys = set()
tmpvalues = []
if count not in sorted_lists: sorted_lists[count] = []
for s in real_crit_lower:
for field in fields:
value = to_unicode(getattr(po, field))
if value and value.lower().find(s) != -1:
tmpvalues.append(value)
tmpkeys.add(rcl2c[s])
if len(tmpvalues) > 0:
sorted_lists[count].append((po, tmpkeys, tmpvalues))
results2sorted_lists(tmpres, sorted_lists)
if searchrpmdb:
tmpres = self.rpmdb.searchPrimaryFieldsMultipleStrings(fields,
real_crit_lower,
lowered=True)
# close our rpmdb connection so we can ctrl-c, kthxbai
self.closeRpmDB()
results2sorted_lists(tmpres, sorted_lists)
del tmpres
results_by_pkg = {} # pkg=[list_of_tuples_of_values]
if searchtags:
tmpres = self.searchPackageTags(real_crit_lower)
for pkg in tmpres:
count = 0
matchkeys = []
tagresults = []
for (match, taglist) in tmpres[pkg]:
count += len(taglist)
matchkeys.append(rcl2c[match])
tagresults.extend(taglist)
if pkg not in results_by_pkg:
results_by_pkg[pkg] = []
results_by_pkg[pkg].append((matchkeys, tagresults))
del tmpres
if sorted_lists.values():
# do the ones we already have
for item in sorted_lists.values():
for pkg, k, v in item:
if pkg not in results_by_pkg:
results_by_pkg[pkg] = []
results_by_pkg[pkg].append((k,v))
# take our existing dict-by-pkg and make the dict-by-count for
# this bizarro sorted_lists format
# FIXME - stab sorted_lists in the chest at some later date
sorted_lists = {}
for pkg in results_by_pkg:
totkeys = []
totvals = []
for (k, v) in results_by_pkg[pkg]:
totkeys.extend(k)
totvals.extend(v)
totkeys = misc.unique(totkeys)
totvals = misc.unique(totvals)
count = len(totkeys)
if count not in sorted_lists:
sorted_lists[count] = []
sorted_lists[count].append((pkg, totkeys, totvals))
# To explain why the following code looks like someone took drugs
# before/during/after coding:
#
# We are sorting a list of: (po, tmpkeys, tmpvalues).
# Eg. (po, ['foo', 'bar'], ['matches foo',
# 'matches barx'])
#
# So we sort, and get a result like:
# po | repo | matching value
# 1. yum-1 | fed | -2
# 2. yum-2 | fed | -2
# 3. yum-2 | @fed | -2
# 4. yum-3 | ups | -1
# ...but without showdups we want to output _just_ #3, which requires
# we find the newest EVR po for the best "matching value". Without keys
# it's the same, except we just want the newest EVR.
# If we screw it up it's probably not even noticeable most of the time
# either, so it's pretty thankless. HTH. HAND.
# By default just sort using package sorting
sort_func = operator.itemgetter(0)
dup = lambda x: True
if keys:
# Take into account the keys found, their original order,
# and number of fields hit as well
sort_func = lambda x: (-sum((critweights[y] for y in x[1])),
-len(x[2]), "\0".join(sorted(x[1])), x[0])
dup = lambda x,y: sort_func(x)[:-1] == sort_func(y)[:-1]
yielded = {}
for val in reversed(sorted(sorted_lists)):
last = None
for sl_vals in sorted(sorted_lists[val], key=sort_func):
if showdups:
(po, ks, vs) = sl_vals
else:
if (sl_vals[0].name, sl_vals[0].arch) in yielded:
continue
na = (sl_vals[0].name, sl_vals[0].arch)
if last is None or (last[0] == na and dup(last[1],sl_vals)):
last = (na, sl_vals)
continue
(po, ks, vs) = last[1]
if last[0] == na: # Dito. yielded test above.
last = None
else:
last = (na, sl_vals)
if keys:
yield (po, ks, vs)
else:
yield (po, vs)
if not showdups:
yielded[(po.name, po.arch)] = 1
if last is not None:
(po, ks, vs) = last[1]
if keys:
yield (po, ks, vs)
else:
yield (po, vs)
def searchPackageTags(self, criteria):
"""Search for and return a list packages that have tags
matching the given criteria.
:param criteria: a list of strings specifying the criteria to
search for
:return: a list of package objects that have tags matching the
given criteria
"""
results = {} # name = [(criteria, taglist)]
for c in criteria:
c = c.lower()
res = self.pkgtags.search_tags(c)
for (name, taglist) in res.items():
pkgs = self.pkgSack.searchNevra(name=name)
if not pkgs:
continue
pkg = pkgs[0]
if pkg not in results:
results[pkg] = []
results[pkg].append((c, taglist))
return results
def searchPackages(self, fields, criteria, callback=None):
"""Deprecated. Search the specified fields for packages that
match the given criteria, and return a list of the results.
:param fields: the fields to search
:param criteria: a list of strings specifying the criteria to
search for
:param callback: a function to print out the results as they
are found. *callback* should have the form callback(po,
matched values list)
"""
warnings.warn(_('searchPackages() will go away in a future version of Yum.\
Use searchGenerator() instead. \n'),
Errors.YumFutureDeprecationWarning, stacklevel=2)
matches = {}
match_gen = self.searchGenerator(fields, criteria)
for (po, matched_strings) in match_gen:
if callback:
callback(po, matched_strings)
if po not in matches:
matches[po] = []
matches[po].extend(matched_strings)
return matches
def searchPackageProvides(self, args, callback=None,
callback_has_matchfor=False):
"""Search for and return a list package objects that provide
the given files or features.
:param args: a list of strings specifying the files and
features to search for the packages that provide
:param callback: a callback function to print out the results
as they are found
:param callback_has_matchfor: whether the callback function
will accept a list of strings to highlight in its output.
If this is true, *args* will be passed to *callback* so
that the files or features that were searched for can be
highlighted
"""
def _arg_data(arg):
if not misc.re_glob(arg):
isglob = False
canBeFile = arg.startswith('/')
else:
isglob = True
canBeFile = misc.re_filename(arg)
return isglob, canBeFile
matches = {}
for arg in args:
arg = to_unicode(arg)
isglob, canBeFile = _arg_data(arg)
if not isglob:
usedDepString = True
where = self.returnPackagesByDep(arg)
else:
usedDepString = False
where = self.pkgSack.searchProvides(arg)
self.verbose_logger.log(logginglevels.DEBUG_1,
P_('Searching %d package', 'Searching %d packages', len(where)), len(where))
for po in sorted(where):
self.verbose_logger.log(logginglevels.DEBUG_2,
_('searching package %s'), po)
tmpvalues = []
if usedDepString:
tmpvalues.append(arg)
if not isglob and canBeFile:
# then it is not a globbed file we have matched it precisely
tmpvalues.append(arg)
if isglob and canBeFile:
self.verbose_logger.log(logginglevels.DEBUG_2,
_('searching in file entries'))
for thisfile in po.dirlist + po.filelist + po.ghostlist:
if fnmatch.fnmatch(thisfile, arg):
tmpvalues.append(thisfile)
self.verbose_logger.log(logginglevels.DEBUG_2,
_('searching in provides entries'))
for (p_name, p_flag, (p_e, p_v, p_r)) in po.provides:
prov = misc.prco_tuple_to_string((p_name, p_flag, (p_e, p_v, p_r)))
if not usedDepString:
if fnmatch.fnmatch(p_name, arg) or fnmatch.fnmatch(prov, arg):
tmpvalues.append(prov)
if len(tmpvalues) > 0:
if callback: # No matchfor, on globs
if not isglob and callback_has_matchfor:
callback(po, tmpvalues, args)
else:
callback(po, tmpvalues)
matches[po] = tmpvalues
# installed rpms, too
taglist = ['filelist', 'dirnames', 'provides_names']
taglist_provonly = ['provides_names']
for arg in args:
isglob, canBeFile = _arg_data(arg)
if not isglob:
where = self.returnInstalledPackagesByDep(arg)
usedDepString = True
for po in where:
tmpvalues = [arg]
if len(tmpvalues) > 0:
if callback:
if callback_has_matchfor:
callback(po, tmpvalues, args)
else:
callback(po, tmpvalues)
matches[po] = tmpvalues
else:
usedDepString = False
where = self.rpmdb
if canBeFile:
arg_taglist = taglist
else:
arg_taglist = taglist_provonly
arg_regex = re.compile(fnmatch.translate(arg))
for po in sorted(where):
searchlist = []
tmpvalues = []
for tag in arg_taglist:
tagdata = getattr(po, tag)
if tagdata is None:
continue
if type(tagdata) is types.ListType:
searchlist.extend(tagdata)
else:
searchlist.append(tagdata)
for item in searchlist:
if arg_regex.match(item):
tmpvalues.append(item)
if len(tmpvalues) > 0:
if callback: # No matchfor, on globs
callback(po, tmpvalues)
matches[po] = tmpvalues
return matches
def _groupInstalledData(self, group):
""" Return a dict of
pkg_name =>
(installed, available,
backlisted-installed, blacklisted-available). """
ret = {}
if not group or self.conf.group_command != 'objects':
return ret
pkg_names = {}
if group.groupid in self.igroups.groups:
pkg_names = self.igroups.groups[group.groupid].pkg_names
all_pkg_names = set(list(pkg_names))
if hasattr(group, 'packages'): # If a comps. group, add remote pkgs.
all_pkg_names.update(group.packages)
for pkg_name in all_pkg_names:
ipkgs = self.rpmdb.searchNames([pkg_name])
if pkg_name not in pkg_names and not ipkgs:
ret[pkg_name] = 'available'
continue
if not ipkgs:
ret[pkg_name] = 'blacklisted-available'
continue
for ipkg in ipkgs:
# Multiarch, if any are installed for the group we count "both"
if ipkg.yumdb_info.get('group_member', '') != group.groupid:
continue
ret[pkg_name] = 'installed'
break
else:
ret[pkg_name] = 'blacklisted-installed'
return ret
def _groupInstalledEnvData(self, evgroup):
""" Return a dict of
grp_name =>
(installed, available,
backlisted-installed, blacklisted-available). """
ret = {}
if not evgroup or self.conf.group_command != 'objects':
return ret
grp_names = {}
if evgroup.environmentid in self.igroups.groups:
grp_names = self.igroups.environments[evgroup.environmentid]
grp_names = grp_names.grp_names
all_grp_names = set(list(grp_names))
if hasattr(evgroup, 'allgroups'): # If a comps. evgroup, add remote grps
all_grp_names.update(evgroup.allgroups)
for grp_name in all_grp_names:
igrp = self.igroups.groups.get(grp_name)
if grp_name not in grp_names and not igrp:
ret[grp_name] = 'available'
continue
if not igrp:
ret[grp_name] = 'blacklisted-available'
continue
if igrp.environment == evgroup.environmentid:
ret[grp_name] = 'installed'
else:
ret[grp_name] = 'blacklisted-installed'
return ret
def _groupReturnGroups(self, patterns=None, ignore_case=True):
igrps = None
ievgrps = None
if patterns is None:
grps = self.comps.groups
if self.conf.group_command == 'objects':
igrps = self.igroups.groups.values()
evgrps = self.comps.environments
if self.conf.group_command == 'objects':
ievgrps = self.igroups.environments.values()
return igrps, grps, ievgrps, evgrps
gpats = []
epats = []
for pat in patterns:
if pat.startswith('@^'):
epats.append(pat[2:])
elif pat.startswith('@'):
gpats.append(pat[1:])
else:
epats.append(pat)
gpats.append(pat)
epats = ",".join(epats)
gpats = ",".join(gpats)
cs = not ignore_case
grps = self.comps.return_groups(gpats, case_sensitive=cs)
# Because we want name matches too, and we don't store group names
# we need to add the groupid's we've found:
if self.conf.group_command == 'objects':
gpats = gpats + "," + ",".join([grp.groupid for grp in grps])
igrps = self.igroups.return_groups(gpats, case_sensitive=cs)
evgrps = self.comps.return_environments(epats, case_sensitive=cs)
if self.conf.group_command == 'objects':
epats = epats+ "," + ",".join([grp.environmentid for grp in evgrps])
ievgrps = self.igroups.return_environments(epats, case_sensitive=cs)
return igrps, grps, ievgrps, evgrps
def doGroupLists(self, uservisible=0, patterns=None, ignore_case=True,
return_evgrps=False):
"""Return two lists of groups: installed groups and available
groups.
:param uservisible: If True, only groups marked as uservisible
will be returned. Otherwise, all groups will be returned
:param patterns: a list of stings. If given, only groups
with names that match the patterns will be included in the
lists. If not given, all groups will be included
:param ignore_case: whether to ignore case when determining
whether group names match the strings in *patterns*
:param return_evgrps: whether to return environment groups as well as
package groups
"""
installed = []
available = []
einstalled = []
eavailable = []
if self.comps.compscount == 0:
raise Errors.GroupsError, _('No group data available for configured repositories')
igrps, grps, ievgrps, evgrps = self._groupReturnGroups(patterns,
ignore_case)
if igrps is not None:
digrps = {}
for igrp in igrps:
digrps[igrp.gid] = igrp
igrps = digrps
if ievgrps is not None:
digrps = {}
for ievgrp in ievgrps:
digrps[ievgrp.evgid] = ievgrp
ievgrps = digrps
for grp in grps:
if igrps is None:
grp_installed = grp.installed
else:
grp_installed = grp.groupid in igrps
if grp_installed:
del igrps[grp.groupid]
if grp_installed:
if uservisible:
if grp.user_visible:
installed.append(grp)
else:
installed.append(grp)
else:
if uservisible:
if grp.user_visible:
available.append(grp)
else:
available.append(grp)
for evgrp in evgrps:
if ievgrps is None:
evgrp_installed = evgrp.installed
else:
evgrp_installed = evgrp.environmentid in ievgrps
if evgrp_installed:
del ievgrps[evgrp.environmentid]
if evgrp_installed:
einstalled.append(evgrp)
else:
eavailable.append(evgrp)
if igrps is None:
igrps = {}
if ievgrps is None:
ievgrps = {}
# Note that we used to get here with igrps/ievgrps that didn't exist
# in comps. but we mock them in comps now because it was hard to deal
# with that everywhere ... so just to confirm.
assert not igrps
assert not ievgrps
for igrp in igrps.values():
# These are installed groups that aren't in comps anymore. so we
# create fake comps groups for them.
grp = comps.Group()
grp.groupid = igrp.gid
grp.installed = True
grp.name = grp.groupid
for pkg_name in igrp.pkg_names:
grp.mandatory_packages[pkg_name] = 1
installed.append(grp)
for ievgrp in ievgrps.values():
# These are installed evgroups that aren't in comps anymore. so we
# create fake comps evgroups for them.
evgrp = comps.Environment()
grp.environmentid = ievgrp.evgid
evgrp.installed = True
evgrp.name = evgrp.environmentid
evgrp._groups = list(ievgrp.groups)
einstalled.append(evgrp)
if return_evgrps:
return (sorted(installed), sorted(available),
sorted(einstalled), sorted(eavailable))
return sorted(installed), sorted(available)
def groupRemove(self, grpid):
"""Mark all the packages in the given group to be removed.
:param grpid: the name of the group containing the packages to
mark for removal
:return: a list of transaction members added to the
transaction set by this function
"""
txmbrs_used = []
thesegroups = self.comps.return_groups(grpid)
if not thesegroups:
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
for thisgroup in thesegroups:
igroup_data = self._groupInstalledData(thisgroup)
thisgroup.toremove = True
pkgs = thisgroup.packages
gid = thisgroup.groupid
for pkg in pkgs:
if pkg in igroup_data and igroup_data[pkg] != 'installed':
continue
txmbrs = self.remove(name=pkg, silence_warnings=True)
txmbrs_used.extend(txmbrs)
for txmbr in txmbrs:
txmbr.groups.append(gid)
if igroup_data:
self.igroups.del_group(gid)
return txmbrs_used
def groupUnremove(self, grpid):
"""Unmark any packages in the given group from being removed.
:param grpid: the name of the group to unmark the packages of
"""
thesegroups = self.comps.return_groups(grpid)
if not thesegroups:
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
for thisgroup in thesegroups:
thisgroup.toremove = False
pkgs = thisgroup.packages
for pkg in thisgroup.packages:
for txmbr in self.tsInfo:
if txmbr.po.name == pkg and txmbr.po.state in TS_INSTALL_STATES:
try:
txmbr.groups.remove(grpid)
except ValueError:
self.verbose_logger.log(logginglevels.DEBUG_1,
_("package %s was not marked in group %s"), txmbr.po,
grpid)
continue
# if there aren't any other groups mentioned then remove the pkg
if len(txmbr.groups) == 0:
self.tsInfo.remove(txmbr.po.pkgtup)
def environmentRemove(self, evgrpid):
"""Mark all the packages in the given group to be removed.
:param evgrpid: the name of the environment containing the groups to
mark for removal
:return: a list of transaction members added to the
transaction set by this function
"""
txmbrs_used = []
thesegroups = self.comps.return_environments(evgrpid)
if not thesegroups:
raise Errors.GroupsError, _("No Environment named %s exists") % to_unicode(evgrpid)
for thisgroup in thesegroups:
igroup_data = self._groupInstalledEnvData(thisgroup)
grps = thisgroup.allgroups
evgid = thisgroup.environmentid
for grp in grps:
if grp in igroup_data and igroup_data[grp] != 'installed':
continue
txmbrs = self.groupRemove(grp)
txmbrs_used.extend(txmbrs)
for txmbr in txmbrs:
txmbr.environments.append(evgid)
if igroup_data:
self.igroups.del_environment(evgid)
return txmbrs_used
def selectGroup(self, grpid, group_package_types=[],
enable_group_conditionals=None, upgrade=False, ievgrp=None):
"""Mark all the packages in the given group to be installed.
:param grpid: the name of the group containing the packages to
mark for installation
:param group_package_types: a list of the types of groups to
work with. This overrides self.conf.group_package_types
:param enable_group_conditionals: overrides
self.conf.enable_group_conditionals
:return: a list of transaction members added to the
transaction set by this function
"""
txmbrs_used = []
thesegroups = self.comps.return_groups(grpid)
if not thesegroups:
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
package_types = self.conf.group_package_types
if group_package_types:
package_types = group_package_types
if self.conf.group_command == 'compat':
upgrade = False
for thisgroup in thesegroups:
if thisgroup.selected:
continue
thisgroup.selected = True
# Can move to upgrade, if installed and calling install
lupgrade = upgrade
pkgs = []
if 'mandatory' in package_types:
pkgs.extend(thisgroup.mandatory_packages)
if 'default' in package_types:
pkgs.extend(thisgroup.default_packages)
if 'optional' in package_types:
pkgs.extend(thisgroup.optional_packages)
igroup_data = self._groupInstalledData(thisgroup)
igrp = None
if igroup_data:
if thisgroup.groupid in self.igroups.groups:
igrp = self.igroups.groups[thisgroup.groupid]
lupgrade = True
else:
self.igroups.add_group(thisgroup.groupid,
thisgroup.packages, ievgrp)
for pkg in igroup_data:
if igroup_data[pkg] == 'installed':
pkgs.append(pkg)
old_txmbrs = len(txmbrs_used)
for pkg in pkgs:
if self.conf.group_command == 'objects':
assert pkg in igroup_data
if (pkg not in igroup_data or
igroup_data[pkg].startswith('blacklisted')):
# (lupgrade and igroup_data[pkg] == 'available')):
msg = _('Skipping package %s from group %s')
self.verbose_logger.log(logginglevels.DEBUG_2,
msg, pkg, thisgroup.groupid)
continue
self.verbose_logger.log(logginglevels.DEBUG_2,
_('Adding package %s from group %s'), pkg, thisgroup.groupid)
if igrp is not None:
igrp.pkg_names.add(pkg)
self.igroups.changed = True
txmbrs = []
try:
if (lupgrade and
(self.conf.group_command == 'simple' or
(igroup_data and igroup_data[pkg] == 'installed'))):
txmbrs = self.update(name = pkg,
pkg_warning_level='debug2')
elif igroup_data and igroup_data[pkg] == 'installed':
pass # Don't upgrade on install.
else:
txmbrs = self.install(name = pkg,
pkg_warning_level='debug2')
for txmbr in txmbrs:
txmbr.group_member = thisgroup.groupid
if lupgrade: # For list transaction.
txmbr._ugroup_member = thisgroup
else:
txmbr._igroup_member = thisgroup
except Errors.InstallError, e:
self.verbose_logger.debug(_('No package named %s available to be installed'),
pkg)
else:
txmbrs_used.extend(txmbrs)
for txmbr in txmbrs:
txmbr.groups.append(thisgroup.groupid)
group_conditionals = self.conf.enable_group_conditionals
if enable_group_conditionals is not None: # has to be this way so we can set it to False
group_conditionals = enable_group_conditionals
count_cond_test = 0
# FIXME: What do we do about group conditionals when group==objects
# or group upgrade for group_command=simple?
if not lupgrade and group_conditionals:
for condreq, cond in thisgroup.conditional_packages.iteritems():
if self.isPackageInstalled(cond):
try:
txmbrs = self.install(name = condreq)
except Errors.InstallError:
# we don't care if the package doesn't exist
continue
else:
if cond not in self.tsInfo.conditionals:
self.tsInfo.conditionals[cond]=[]
txmbrs_used.extend(txmbrs)
for txmbr in txmbrs:
txmbr.groups.append(thisgroup.groupid)
self.tsInfo.conditionals[cond].append(txmbr.po)
continue
# Otherwise we hook into tsInfo.add to make sure
# we'll catch it if it's added later in this transaction
pkgs = self.pkgSack.searchNevra(name=condreq)
if pkgs:
if self.arch.multilib:
if self.conf.multilib_policy == 'best':
use = []
best = self.arch.legit_multi_arches
best.append('noarch')
for pkg in pkgs:
if pkg.arch in best:
use.append(pkg)
pkgs = use
pkgs = packagesNewestByName(pkgs)
count_cond_test += len(pkgs)
if cond not in self.tsInfo.conditionals:
self.tsInfo.conditionals[cond] = []
self.tsInfo.conditionals[cond].extend(pkgs)
if not lupgrade and len(txmbrs_used) == old_txmbrs:
self.logger.critical(_('Warning: Group %s does not have any packages to install.'), thisgroup.groupid)
if count_cond_test:
self.logger.critical(_('Group %s does have %u conditional packages, which may get installed.'),
thisgroup.groupid, count_cond_test)
return txmbrs_used
def deselectGroup(self, grpid, force=False):
"""Unmark the packages in the given group from being
installed.
:param grpid: the name of the group containing the packages to
unmark from installation
:param force: if True, force remove all the packages in the
given group from the transaction
"""
if not self.comps.has_group(grpid):
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
thesegroups = self.comps.return_groups(grpid)
if not thesegroups:
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
# FIXME: Do something with groups as objects, and env. groups.
for thisgroup in thesegroups:
thisgroup.selected = False
for pkgname in thisgroup.packages:
txmbrs = self.tsInfo.getMembersWithState(None,TS_INSTALL_STATES)
for txmbr in txmbrs:
if txmbr.po.name != pkgname:
continue
if not force:
try:
txmbr.groups.remove(grpid)
except ValueError:
self.verbose_logger.log(logginglevels.DEBUG_1,
_("package %s was not marked in group %s"), txmbr.po,
grpid)
continue
# If the pkg isn't part of any group, or the group is
# being forced out ... then remove the pkg
if force or len(txmbr.groups) == 0:
self.tsInfo.remove(txmbr.po.pkgtup)
for pkg in self.tsInfo.conditionals.get(txmbr.name, []):
self.tsInfo.remove(pkg.pkgtup)
def selectEnvironment(self, evgrpid, group_package_types=[],
enable_group_conditionals=None, upgrade=False):
"""Mark all the groups in the given environment group to be installed.
:param evgrpid: the name of the env. group containing the groups to
mark for installation
:param group_package_types: a list of the types of groups to
work with. This overrides self.conf.group_package_types
:param enable_group_conditionals: overrides
self.conf.enable_group_conditionals
:return: a list of transaction members added to the
transaction set by this function
"""
evgrps = self.comps.return_environments(evgrpid)
if not evgrps:
raise Errors.GroupsError, _("No Environment named %s exists") % to_unicode(evgrpid)
ret = []
for evgrp in evgrps:
ievgrp = None
if self.conf.group_command == 'compat':
grps = ",".join(sorted(evgrp.groups))
elif self.conf.group_command == 'simple':
if not upgrade:
grps = ",".join(sorted(evgrp.groups))
else: # Only upgrade the installed groups...
grps = []
for grpid in evgrp.groups:
grp = self.comps.return_group(grpid)
if grp is None:
continue
if not grp.installed:
continue
grps.append(grpid)
grps = ",".join(sorted(grps))
elif self.conf.group_command == 'objects':
igroup_data = self._groupInstalledEnvData(evgrp)
grps = set()
for grpid in evgrp.groups:
if (grpid not in igroup_data or
igroup_data[grpid].startswith('blacklisted')):
msg = _('Skipping group %s from environment %s')
self.verbose_logger.log(logginglevels.DEBUG_2,
msg, grpid, evgrp.environmentid)
continue
grps.add(grpid)
if evgrp.environmentid in self.igroups.environments:
ievgrp = self.igroups.environments[evgrp.environmentid]
# Add groups from the installed evgrp, for Eg. installed
# only evgrps.
for grp_name in ievgrp.grp_names:
if grp_name not in self.igroups.groups:
continue
grp_evgrpid = self.igroups.groups[grp_name].environment
if grp_evgrpid != evgrp.environmentid:
continue
grps.add(grp_name)
else:
ievgrp = self.igroups.add_environment(evgrp.environmentid,
evgrp.allgroups)
grps = ",".join(sorted(grps))
try:
txs = self.selectGroup(grps,
group_package_types,
enable_group_conditionals, upgrade,
ievgrp=ievgrp)
except Errors.GroupsError:
continue
ret.extend(txs)
return ret
def deselectEnvironment(self, evgrpid, force=False):
"""Unmark the groups in the given environment group from being
installed.
:param evgrpid: the name of the environment group containing the
groups to unmark from installation
:param force: if True, force remove all the packages in the
given groups from the transaction
"""
evgrps = self.comps.return_environments(evgrpid)
if not thesegroups:
raise Errors.GroupsError, _("No Environment named %s exists") % to_unicode(evgrpid)
for evgrp in evgrps:
grps = ",".join(sorted(evgrp.groups))
self.deselectGroup(grps, force)
# FIXME: env. needs to be marked not-to-be-installed, etc.
def getPackageObject(self, pkgtup, allow_missing=False):
"""Return a package object that corresponds to the given
package tuple.
:param pkgtup: the package tuple specifying the package object
to return
:param allow_missing: If no package corresponding to the given
package tuple can be found, None is returned if
*allow_missing* is True, and a :class:`yum.Errors.DepError` is
raised if *allow_missing* is False.
:return: a package object corresponding to the given package tuple
:raises: a :class:`yum.Errors.DepError` if no package
corresponding to the given package tuple can be found, and
*allow_missing* is False
"""
# look it up in the self.localPackages first:
for po in self.localPackages:
if po.pkgtup == pkgtup:
return po
pkgs = self.pkgSack.searchPkgTuple(pkgtup)
if len(pkgs) == 0:
self._add_not_found_a(pkgs, pkgtup=pkgtup)
if allow_missing: # This can happen due to excludes after .up has
return None # happened.
raise Errors.DepError, _('Package tuple %s could not be found in packagesack') % str(pkgtup)
if len(pkgs) > 1: # boy it'd be nice to do something smarter here FIXME
result = pkgs[0]
else:
result = pkgs[0] # which should be the only
# this is where we could do something to figure out which repository
# is the best one to pull from
return result
def getInstalledPackageObject(self, pkgtup):
"""Return a :class:`yum.packages.YumInstalledPackage` object that
corresponds to the given package tuple. This function should
be used instead of :func:`searchPkgTuple` if you are assuming
that the package object exists.
:param pkgtup: the package tuple specifying the package object
to return
:return: a :class:`yum.packages.YumInstalledPackage` object corresponding
to the given package tuple
:raises: a :class:`yum.Errors.RpmDBError` if the specified package
object cannot be found
"""
pkgs = self.rpmdb.searchPkgTuple(pkgtup)
if len(pkgs) == 0:
self._add_not_found_i(pkgs, pkgtup=pkgtup)
raise Errors.RpmDBError, _('Package tuple %s could not be found in rpmdb') % str(pkgtup)
# Dito. FIXME from getPackageObject() for len() > 1 ... :)
po = pkgs[0] # take the first one
return po
def gpgKeyCheck(self):
"""Checks for the presence of GPG keys in the rpmdb.
:return: 0 if there are no GPG keys in the rpmdb, and 1 if
there are keys
"""
gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum'
if os.path.exists(gpgkeyschecked):
return 1
myts = rpmUtils.transaction.initReadOnlyTransaction(root=self.conf.installroot)
myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS))
idx = myts.dbMatch('name', 'gpg-pubkey')
keys = idx.count()
del idx
del myts
if keys == 0:
return 0
else:
mydir = os.path.dirname(gpgkeyschecked)
if not os.path.exists(mydir):
os.makedirs(mydir)
fo = open(gpgkeyschecked, 'w')
fo.close()
del fo
return 1
def returnPackagesByDep(self, depstring):
"""Return a list of package objects that provide the given
dependencies.
:param depstring: a string specifying the dependency to return
the packages that fulfil
:return: a list of packages that fulfil the given dependency
"""
if not depstring:
return []
# parse the string out
# either it is 'dep (some operator) e:v-r'
# or /file/dep
# or packagename
if type(depstring) == types.TupleType:
(depname, depflags, depver) = depstring
else:
depname = depstring
depflags = None
depver = None
if depstring[0] != '/':
# not a file dep - look at it for being versioned
dep_split = depstring.split()
if len(dep_split) == 3:
depname, flagsymbol, depver = dep_split
if not flagsymbol in SYMBOLFLAGS:
raise Errors.YumBaseError, _('Invalid version flag from: %s') % str(depstring)
depflags = SYMBOLFLAGS[flagsymbol]
if depflags is None: # This does wildcards...
return self.pkgSack.searchProvides(depstring)
# This does flags+versions, but no wildcards...
return self.pkgSack.getProvides(depname, depflags, depver).keys()
def returnPackageByDep(self, depstring):
"""Return the best, or first, package object that provides the
given dependencies.
:param depstring: a string specifying the dependency to return
the package that fulfils
:return: the best, or first, package that fulfils the given
dependency
:raises: a :class:`yum.Errors.YumBaseError` if no packages that
fulfil the given dependency can be found
"""
# we get all sorts of randomness here
errstring = depstring
if type(depstring) not in types.StringTypes:
errstring = str(depstring)
try:
pkglist = self.returnPackagesByDep(depstring)
except Errors.YumBaseError:
raise Errors.YumBaseError, _('No Package found for %s') % errstring
ps = ListPackageSack(pkglist)
result = self._bestPackageFromList(ps.returnNewestByNameArch(),
req=errstring)
if result is None:
raise Errors.YumBaseError, _('No Package found for %s') % errstring
return result
def returnInstalledPackagesByDep(self, depstring):
"""Return a list of installed package objects that provide the
given dependencies.
:param depstring: a string specifying the dependency to return
the packages that fulfil
:return: a list of installed packages that fulfil the given
dependency
"""
if not depstring:
return []
# parse the string out
# either it is 'dep (some operator) e:v-r'
# or /file/dep
# or packagename
if type(depstring) == types.TupleType:
(depname, depflags, depver) = depstring
else:
depname = depstring
depflags = None
depver = None
if depstring[0] != '/':
# not a file dep - look at it for being versioned
dep_split = depstring.split()
if len(dep_split) == 3:
depname, flagsymbol, depver = dep_split
if not flagsymbol in SYMBOLFLAGS:
raise Errors.YumBaseError, _('Invalid version flag from: %s') % str(depstring)
depflags = SYMBOLFLAGS[flagsymbol]
if depflags is None: # This does wildcards...
return self.rpmdb.searchProvides(depstring)
# This does flags+versions, but no wildcards...
return self.rpmdb.getProvides(depname, depflags, depver).keys()
def returnInstalledPackageByDep(self, depstring):
"""Return the best, or first, installed package object that provides the
given dependencies.
:param depstring: a string specifying the dependency to return
the package that fulfils
:return: the best, or first, installed package that fulfils the given
dependency
:raises: a :class:`yum.Errors.YumBaseError` if no packages that
fulfil the given dependency can be found
"""
# we get all sorts of randomness here
errstring = depstring
if type(depstring) not in types.StringTypes:
errstring = str(depstring)
try:
pkglist = self.returnInstalledPackagesByDep(depstring)
except Errors.YumBaseError:
raise Errors.YumBaseError, _('No Package found for %s') % errstring
ps = ListPackageSack(pkglist)
result = self._bestPackageFromList(ps.returnNewestByNameArch(),
req=errstring)
if result is None:
raise Errors.YumBaseError, _('No Package found for %s') % errstring
return result
def _bestPackageFromList(self, pkglist, req=None):
"""take list of package objects and return the best package object.
If the list is empty, return None.
Note: this is not aware of multilib so make sure you're only
passing it packages of a single arch group.
:param pkglist: the list of packages to return the best
packages from
:param req: the requirement from the user
:return: a list of the best packages from *pkglist*
"""
if len(pkglist) == 0:
return None
if len(pkglist) == 1:
return pkglist[0]
bestlist = self._compare_providers(pkglist, reqpo=None, req=req)
return bestlist[0][0]
def bestPackagesFromList(self, pkglist, arch=None, single_name=False,
req=None):
"""Return the best packages from a list of packages. This
function is multilib aware, so that it will not compare
multilib to singlelib packages.
:param pkglist: the list of packages to return the best
packages from
:param arch: packages will be selected that are compatible
with the architecture specified by *arch*
:param single_name: whether to return a single package name
:param req: the requirement from the user
:return: a list of the best packages from *pkglist*
"""
returnlist = []
compatArchList = self.arch.get_arch_list(arch)
multiLib = []
singleLib = []
noarch = []
for po in pkglist:
if po.arch not in compatArchList:
continue
elif po.arch in ("noarch"):
noarch.append(po)
elif isMultiLibArch(arch=po.arch):
multiLib.append(po)
else:
singleLib.append(po)
# we now have three lists. find the best package(s) of each
multi = self._bestPackageFromList(multiLib, req=req)
single = self._bestPackageFromList(singleLib, req=req)
no = self._bestPackageFromList(noarch, req=req)
if single_name and multi and single and multi.name != single.name:
# Sinlge _must_ match multi, if we want a single package name
single = None
# now, to figure out which arches we actually want
# if there aren't noarch packages, it's easy. multi + single
if no is None:
if multi: returnlist.append(multi)
if single: returnlist.append(single)
# if there's a noarch and it's newer than the multilib, we want
# just the noarch. otherwise, we want multi + single
elif multi:
best = self._bestPackageFromList([multi,no], req=req)
if best.arch == "noarch":
returnlist.append(no)
else:
if multi: returnlist.append(multi)
if single: returnlist.append(single)
# similar for the non-multilib case
elif single:
best = self._bestPackageFromList([single,no], req=req)
if best.arch == "noarch":
returnlist.append(no)
else:
returnlist.append(single)
# if there's not a multi or single lib, then we want the noarch
else:
returnlist.append(no)
return returnlist
# FIXME: This doesn't really work, as it assumes one obsoleter for each pkg
# when we can have:
# 1 pkg obsoleted by multiple pkgs _and_
# 1 pkg obsoleting multiple pkgs
# ...and we need to detect loops, and get the arches "right" and do this
# for chains. Atm. I hate obsoletes, and I can't get it to work better,
# easily ... so screw it, don't create huge chains of obsoletes with some
# loops in there too ... or I'll have to hurt you.
def _pkg2obspkg(self, po):
""" Given a package return the package it's obsoleted by and so
we should install instead. Or None if there isn't one. """
if self._up is not None:
thispkgobsdict = self.up.checkForObsolete([po.pkgtup])
else:
# This is pretty hacky, but saves a huge amount of time for small
# ops.
if not self.conf.obsoletes:
return None
if not hasattr(self, '_up_obs_hack'):
obs_init = time.time()
up = rpmUtils.updates.Updates([], [])
up.rawobsoletes = self.pkgSack.returnObsoletes(newest=True)
self.verbose_logger.debug('Obs Init time: %0.3f' % (time.time()
- obs_init))
self._up_obs_hack = up
thispkgobsdict = self._up_obs_hack.checkForObsolete([po.pkgtup])
if po.pkgtup in thispkgobsdict:
obsoleting = thispkgobsdict[po.pkgtup]
oobsoleting = []
# We want to keep the arch. of the obsoleted pkg. if possible.
for opkgtup in obsoleting:
if not canCoinstall(po.arch, opkgtup[1]):
oobsoleting.append(opkgtup)
if oobsoleting:
obsoleting = oobsoleting
if len(obsoleting) > 1:
# Pick the first name, and run with it...
first = obsoleting[0]
obsoleting = [pkgtup for pkgtup in obsoleting
if first[0] == pkgtup[0]]
if len(obsoleting) > 1:
# Lock to the latest version...
def _sort_ver(x, y):
n1,a1,e1,v1,r1 = x
n2,a2,e2,v2,r2 = y
return compareEVR((e1,v1,r1), (e2,v2,r2))
obsoleting.sort(_sort_ver)
first = obsoleting[0]
obsoleting = [pkgtup for pkgtup in obsoleting
if not _sort_ver(first, pkgtup)]
if len(obsoleting) > 1:
# Now do arch distance (see depsolve:compare_providers)...
def _sort_arch_i(carch, a1, a2):
res1 = archDifference(carch, a1)
if not res1:
return 0
res2 = archDifference(carch, a2)
if not res2:
return 0
return res1 - res2
def _sort_arch(x, y):
n1,a1,e1,v1,r1 = x
n2,a2,e2,v2,r2 = y
ret = _sort_arch_i(po.arch, a1, a2)
if ret:
return ret
ret = _sort_arch_i(self.arch.bestarch, a1, a2)
return ret
obsoleting.sort(_sort_arch)
for pkgtup in obsoleting:
pkg = self.getPackageObject(pkgtup, allow_missing=True)
if pkg is not None:
return pkg
return None
return None
def _test_loop(self, node, next_func):
""" Generic comp. sci. test for looping, walk the list with two pointers
moving one twice as fast as the other. If they are ever == you have
a loop. If loop we return None, if no loop the last element. """
slow = node
done = False
while True:
next = next_func(node)
if next is None and not done: return None
if next is None: return node
node = next_func(next)
if node is None: return next
done = True
slow = next_func(slow)
if node == slow:
return None
def _try_bold(self, string_):
"""Attempt to make the string look bold in terminal."""
if hasattr(self, 'term'):
return '%s%s%s' % (self.term.MODE['bold'], string_, self.term.MODE['normal'])
return string_
def _at_groupinstall(self, pattern, upgrade=False):
" Do groupinstall via. leading @ on the cmd line, for install."
assert pattern[0] == '@'
group_string = pattern[1:]
tx_return = []
try: comps = self.comps
except yum.Errors.GroupsError, e:
# No Groups Available in any repository?
# This also means no installed groups, when using objects.
self.logger.warning(e)
return tx_return
found = False
if group_string and group_string[0] == '^':
group_string = group_string[1:]
# Actually dealing with "environment groups".
for env_grp in comps.return_environments(group_string):
found = True
txmbrs = self.selectEnvironment(env_grp.environmentid,
upgrade=upgrade)
tx_return.extend(txmbrs)
else:
for group in comps.return_groups(group_string):
found = True
txmbrs = self.selectGroup(group.groupid, upgrade=upgrade)
tx_return.extend(txmbrs)
if not found:
raise Errors.GroupInstallError, _('Group %s does not exist.') % self._try_bold(group_string)
return tx_return
def _at_groupupgrade(self, pattern):
" Do group upgrade via. leading @ on the cmd line, for update."
try:
return self._at_groupinstall(pattern, upgrade=True)
except Errors.GroupInstallError, e:
self.logger.warning(_('Warning: %s'), e)
return []
def _at_groupremove(self, pattern):
" Do groupremove via. leading @ on the cmd line, for remove."
assert pattern[0] == '@'
group_string = pattern[1:]
tx_return = []
if group_string and group_string[0] == '^':
group_string = group_string[1:]
# Actually dealing with "environment groups".
try:
txmbrs = self.environmentRemove(group_string)
except yum.Errors.GroupsError:
self.logger.critical(_('Warning: Environment Group %s does not exist.'), group_string)
else:
tx_return.extend(txmbrs)
return tx_return
try:
txmbrs = self.groupRemove(group_string)
except yum.Errors.GroupsError:
self.logger.critical(_('No group named %s exists'), group_string)
else:
tx_return.extend(txmbrs)
return tx_return
# Note that this returns available pkgs, and not txmbrs like the other
# _at_group* functions.
def _at_groupdowngrade(self, pattern):
" Do downgrade of a group via. leading @ on the cmd line."
assert pattern[0] == '@'
grpid = pattern[1:]
# FIXME: **** environment groups and groups as objects... ****
thesegroups = self.comps.return_groups(grpid)
if not thesegroups:
raise Errors.GroupsError, _("No Group named %s exists") % to_unicode(grpid)
pkgnames = set()
for thisgroup in thesegroups:
pkgnames.update(thisgroup.packages)
return self.pkgSack.searchNames(pkgnames)
def _minus_deselect(self, pattern):
""" Remove things from the transaction, like kickstart. """
assert pattern[0] == '-'
pat = pattern[1:].strip()
if pat and pat.startswith('@^'):
pat = pat[2:]
return self.deselectEnvironment(pat)
if pat and pat[0] == '@':
pat = pat[1:]
return self.deselectGroup(pat)
return self.tsInfo.deselect(pat)
def _find_obsoletees(self, po):
""" Return the pkgs. that are obsoleted by the po we pass in. """
if not self.conf.obsoletes:
return
if not isinstance(po, YumLocalPackage):
for (obstup, inst_tup) in self.up.getObsoletersTuples(name=po.name):
if po.pkgtup == obstup:
installed_pkg = self.getInstalledPackageObject(inst_tup)
yield installed_pkg
else:
for pkg in self._find_obsoletees_direct(po):
yield pkg
def _find_obsoletees_direct(self, po):
""" Return the pkgs. that are obsoleted by the po we pass in. This works
directly on the package data, for two reasons:
1. Consulting .up. has a slow setup for small/fast ops.
2. We need this work even if obsoletes are turned off, because rpm
will be doing it for us. """
for obs_n in po.obsoletes_names:
for pkg in self.rpmdb.searchNevra(name=obs_n):
if pkg.obsoletedBy([po]):
yield pkg
def _add_prob_flags(self, *flags):
""" Add all of the passed flags to the tsInfo.probFilterFlags array. """
for flag in flags:
if flag not in self.tsInfo.probFilterFlags:
self.tsInfo.probFilterFlags.append(flag)
def _install_is_upgrade(self, po, ipkgs):
""" See if po is an upgradeable version of an installed pkg.
Non-compat. arch differences mean no. """
if False and self._up is not None:
# This is the old code, not sure it's good to have two paths. And
# we don't want to create .up. (which requires init repos.) if we
# don't have to.
return po.pkgtup in self.up.updating_dict
if self.allowedMultipleInstalls(po):
return False
for ipkg in ipkgs:
if po.verLE(ipkg):
continue
if po.arch == ipkg.arch: # always fine.
return True
if 'noarch' in (po.arch, ipkg.arch):
return True
if not self.arch.multilib:
return True
if canCoinstall(po.arch, ipkg.arch):
continue
return True
return False
def _valid_install_arch(self, po, ipkgs=None):
''' See if we can install this arch of package, mainly for
i386 vs. i586 or ppc64 vs. ppc64 etc. '''
if not ipkgs:
ipkgs = self.rpmdb.searchNames([po.name])
else:
ipkgs = ipkgs[:]
# Add these anyway, just to be sure.
for txmbr in self.tsInfo.matchNaevr(po.name):
if txmbr.output_state not in TS_INSTALL_STATES:
continue
ipkgs.append(txmbr.po)
for ipkg in ipkgs:
if po.arch == ipkg.arch:
continue
if not po.verEQ(ipkg):
continue
if canCoinstall(po.arch, ipkg.arch):
continue
self.verbose_logger.log(logginglevels.INFO_2,
_("Package: %s - can't co-install with %s"), po, ipkg)
return False
return True
def install(self, po=None, **kwargs):
"""Mark the specified item for installation. If a package
object is given, mark it for installation. Otherwise, mark
the best package specified by the key word arguments for
installation.
:param po: a package object to install
:param kwargs: if *po* is not specified, these keyword
arguments will be used to find the best package to install
:return: a list of the transaction members added to the
transaction set by this function
:raises: :class:`yum.Errors.InstallError` if there is a problem
installing the package
"""
# This is kind of hacky, we really need a better way to do errors than
# doing them directly from .install/etc. ... but this is easy. *sigh*.
# We are only using this in "groupinstall" atm. ... so we don't have
# a long list of "blah already installed." messages when people run
# "groupinstall mygroup" in yum-cron etc.
pkg_warn = kwargs.get('pkg_warning_level', 'flibble')
def _dbg2(*args, **kwargs):
self.verbose_logger.log(logginglevels.DEBUG_2, *args, **kwargs)
level2func = {'debug2' : _dbg2,
'warning' : self.verbose_logger.warning}
if pkg_warn not in level2func:
pkg_warn = 'warning'
pkg_warn = level2func[pkg_warn]
pkgs = []
was_pattern = False
if po:
if isinstance(po, YumAvailablePackage) or isinstance(po, YumLocalPackage):
pkgs.append(po)
else:
raise Errors.InstallError, _('Package Object was not a package object instance')
else:
if not kwargs:
raise Errors.InstallError, _('Nothing specified to install')
if 'pattern' in kwargs:
if kwargs['pattern'] and kwargs['pattern'][0] == '-':
return self._minus_deselect(kwargs['pattern'])
if kwargs['pattern'] and kwargs['pattern'][0] == '@':
return self._at_groupinstall(kwargs['pattern'])
repoid = None # All of them
if 'repoid' in kwargs:
repoid = kwargs['repoid']
was_pattern = True
pats = [kwargs['pattern']]
mypkgs = self.pkgSack.returnPackages(patterns=pats,
repoid=repoid,
ignore_case=False)
pkgs.extend(mypkgs)
# if we have anything left unmatched, let's take a look for it
# being a dep like glibc.so.2 or /foo/bar/baz
if not mypkgs:
arg = kwargs['pattern']
self.verbose_logger.debug(_('Checking for virtual provide or file-provide for %s'),
arg)
mypkgs = self.returnPackagesByDep(arg)
if repoid:
mypkgs = misc.filter_pkgs_repoid(mypkgs, repoid)
if not misc.re_glob(arg):
mypkgs = self.bestPackagesFromList(mypkgs,
single_name=True,
req=arg)
pkgs.extend(mypkgs)
else:
nevra_dict = self._nevra_kwarg_parse(kwargs)
pkgs = self.pkgSack.searchNevra(name=nevra_dict['name'],
epoch=nevra_dict['epoch'], arch=nevra_dict['arch'],
ver=nevra_dict['version'], rel=nevra_dict['release'])
self._add_not_found_a(pkgs, nevra_dict)
pkgs = misc.filter_pkgs_repoid(pkgs, kwargs.get('repoid'))
if pkgs:
# if was_pattern or nevra-dict['arch'] is none, take the list
# of arches based on our multilib_compat config and
# toss out any pkgs of any arch NOT in that arch list
# only do these things if we're multilib
if self.arch.multilib:
if was_pattern or not nevra_dict['arch']: # and only if they
# they didn't specify an arch
if self.conf.multilib_policy == 'best':
pkgs_by_name = {}
use = []
not_added = []
best = self.arch.legit_multi_arches
best.append('noarch')
for pkg in pkgs:
if pkg.arch in best:
pkgs_by_name[pkg.name] = 1
use.append(pkg)
else:
not_added.append(pkg)
for pkg in not_added:
if not pkg.name in pkgs_by_name:
use.append(pkg)
pkgs = use
pkgs = packagesNewestByName(pkgs)
pkgbyname = {}
for pkg in pkgs:
if pkg.name not in pkgbyname:
pkgbyname[pkg.name] = [ pkg ]
else:
pkgbyname[pkg.name].append(pkg)
lst = []
for pkgs in pkgbyname.values():
lst.extend(self.bestPackagesFromList(pkgs))
pkgs = lst
if not pkgs:
# Do we still want to return errors here?
# We don't in the cases below, so I didn't here...
if 'pattern' in kwargs:
pkgs = self.rpmdb.returnPackages(patterns=[kwargs['pattern']],
ignore_case=False)
if 'name' in kwargs:
pkgs = self.rpmdb.searchNevra(name=kwargs['name'])
if 'pkgtup' in kwargs:
pkgs = self.rpmdb.searchNevra(name=kwargs['pkgtup'][0])
# Warning here does "weird" things when doing:
# yum --disablerepo='*' install '*'
# etc. ... see RHBZ#480402
if False:
for pkg in pkgs:
self.verbose_logger.warning(_('Package %s installed and not available'), pkg)
if pkgs:
return []
raise Errors.InstallError, _('No package(s) available to install')
# FIXME - lots more checking here
# - install instead of erase
# - better error handling/reporting
tx_return = []
for po in pkgs:
if self.tsInfo.exists(pkgtup=po.pkgtup):
if self.tsInfo.getMembersWithState(po.pkgtup, TS_INSTALL_STATES):
self.verbose_logger.log(logginglevels.DEBUG_1,
_('Package: %s - already in transaction set'), po)
tx_return.extend(self.tsInfo.getMembers(pkgtup=po.pkgtup))
continue
# make sure this shouldn't be passed to update:
ipkgs = self.rpmdb.searchNames([po.name])
if ipkgs and self._install_is_upgrade(po, ipkgs):
txmbrs = self.update(po=po, repoid=kwargs.get('repoid'))
tx_return.extend(txmbrs)
continue
if not self._valid_install_arch(po, ipkgs):
continue
# Make sure we're not installing a package which is obsoleted by
# something else in the repo. Unless there is a obsoletion loop,
# at which point ignore everything.
# NOTE: This is broken wrt. repoid...
obsoleting_pkg = None
if self.conf.obsoletes and not isinstance(po, YumLocalPackage):
obsoleting_pkg = self._test_loop(po, self._pkg2obspkg)
if obsoleting_pkg is not None:
# this is not a definitive check but it'll make sure we don't
# pull in foo.i586 when foo.x86_64 already obsoletes the pkg and
# is already installed
already_obs = None
pkgs = self.rpmdb.searchNevra(name=obsoleting_pkg.name)
pkgs = po.obsoletedBy(pkgs, limit=1)
if pkgs:
already_obs = pkgs[0]
if already_obs:
pkg_warn(_('Package %s is obsoleted by %s which is already installed'),
po, already_obs)
else:
if 'provides_for' in kwargs:
if not obsoleting_pkg.provides_for(kwargs['provides_for']):
pkg_warn(_('Package %s is obsoleted by %s, but obsoleting package does not provide for requirements'),
po.name, obsoleting_pkg.name)
continue
pkg_warn(_('Package %s is obsoleted by %s, trying to install %s instead'),
po.name, obsoleting_pkg.name, obsoleting_pkg)
tx_return.extend(self.install(po=obsoleting_pkg))
continue
# make sure it's not already installed
if self.rpmdb.contains(po=po):
if not self.tsInfo.getMembersWithState(po.pkgtup, TS_REMOVE_STATES):
pkg_warn(_('Package %s already installed and latest version'), po)
continue
# make sure we don't have a name.arch of this already installed
# if so pass it to update b/c it should be able to figure it out
# if self.rpmdb.contains(name=po.name, arch=po.arch) and not self.allowedMultipleInstalls(po):
if not self.allowedMultipleInstalls(po):
found = True
for ipkg in self.rpmdb.searchNevra(name=po.name, arch=po.arch):
found = False
if self.tsInfo.getMembersWithState(ipkg.pkgtup, TS_REMOVE_STATES):
found = True
break
if not found:
pkg_warn(_('Package matching %s already installed. Checking for update.'), po)
txmbrs = self.update(po=po, repoid=kwargs.get('repoid'))
tx_return.extend(txmbrs)
continue
# at this point we are going to mark the pkg to be installed, make sure
# it's not an older package that is allowed in due to multiple installs
# or some other oddity. If it is - then modify the problem filter to cope
for ipkg in self.rpmdb.searchNevra(name=po.name, arch=po.arch):
if ipkg.verEQ(po):
self._add_prob_flags(rpm.RPMPROB_FILTER_REPLACEPKG,
rpm.RPMPROB_FILTER_REPLACENEWFILES,
rpm.RPMPROB_FILTER_REPLACEOLDFILES)
# Yum needs the remove to happen before we allow the
# install of the same version. But rpm doesn't like that
# as it then has an install which removes the old version
# and a remove, which also tries to remove the old version.
self.tsInfo.remove(ipkg.pkgtup)
break
for ipkg in self.rpmdb.searchNevra(name=po.name):
if ipkg.verGT(po) and not canCoinstall(ipkg.arch, po.arch):
self._add_prob_flags(rpm.RPMPROB_FILTER_OLDPACKAGE)
break
# it doesn't obsolete anything. If it does, mark that in the tsInfo, too
obs_pkgs = list(self._find_obsoletees_direct(po))
if obs_pkgs:
for obsoletee in obs_pkgs:
txmbr = self.tsInfo.addObsoleting(po, obsoletee)
self.tsInfo.addObsoleted(obsoletee, po)
tx_return.append(txmbr)
else:
txmbr = self.tsInfo.addInstall(po)
tx_return.append(txmbr)
return tx_return
def _check_new_update_provides(self, opkg, npkg):
""" Check for any difference in the provides of the old and new update
that is needed by the transaction. If so we "update" those pkgs
too, to the latest version. """
oprovs = set(opkg.returnPrco('provides'))
nprovs = set(npkg.returnPrco('provides'))
tx_return = []
for prov in oprovs.difference(nprovs):
reqs = self.tsInfo.getRequires(*prov)
for pkg in reqs:
for req in reqs[pkg]:
if not npkg.inPrcoRange('provides', req):
naTup = (pkg.name, pkg.arch)
for pkg in self.pkgSack.returnNewestByNameArch(naTup):
tx_return.extend(self.update(po=pkg))
break
return tx_return
def _newer_update_in_trans(self, pkgtup, available_pkg, tx_return):
""" We return True if there is a newer package already in the
transaction. If there is an older one, we remove it (and update any
deps. that aren't satisfied by the newer pkg) and return False so
we'll update to this newer pkg. """
found = False
for txmbr in self.tsInfo.getMembersWithState(pkgtup, [TS_UPDATED]):
count = 0
for po in txmbr.updated_by:
if available_pkg.verLE(po):
count += 1
else:
for ntxmbr in self.tsInfo.getMembers(po.pkgtup):
self.tsInfo.remove(ntxmbr.po.pkgtup)
txs = self._check_new_update_provides(ntxmbr.po,
available_pkg)
tx_return.extend(txs)
if count:
found = True
else:
self.tsInfo.remove(txmbr.po.pkgtup)
return found
def _add_up_txmbr(self, requiringPo, upkg, ipkg):
txmbr = self.tsInfo.addUpdate(upkg, ipkg)
if requiringPo:
txmbr.setAsDep(requiringPo)
if ('reason' in ipkg.yumdb_info and ipkg.yumdb_info.reason == 'dep'):
txmbr.reason = 'dep'
return txmbr
def update(self, po=None, requiringPo=None, update_to=False, **kwargs):
"""Mark the specified items to be updated. If a package
object is given, mark it. Else, if a package is specified by
the keyword arguments, mark it. Finally, if nothing is given,
mark all installed packages to be updated.
:param po: the package object to be marked for updating
:param requiringPo: the package object that requires the
upgrade
:param update_to: if *update_to* is True, the update will only
be run if it will update the given package to the given
version. For example, if the package foo-1-2 is installed,::
updatePkgs(["foo-1-2"], update_to=False)
will work identically to::
updatePkgs(["foo"])
but::
updatePkgs(["foo-1-2"], update_to=True)
will do nothing
:param kwargs: if *po* is not given, the names or wildcards in
*kwargs* will be used to find the packages to update
:return: a list of transaction members added to the
transaction set by this function
"""
# check for args - if no po nor kwargs, do them all
# if po, do it, ignore all else
# if no po do kwargs
# uninstalled pkgs called for update get returned with errors in a list, maybe?
pkg_warn = kwargs.get('pkg_warning_level', 'flibble')
def _dbg2(*args, **kwargs):
self.verbose_logger.log(logginglevels.DEBUG_2, *args, **kwargs)
level2func = {'debug2' : _dbg2,
'warning' : self.verbose_logger.warning}
if pkg_warn not in level2func:
pkg_warn = 'warning'
pkg_warn = level2func[pkg_warn]
tx_return = []
if not po and not kwargs: # update everything (the easy case)
self.verbose_logger.log(logginglevels.DEBUG_2, _('Updating Everything'))
updates = self.up.getUpdatesTuples()
if self.conf.obsoletes:
obsoletes = self.up.getObsoletesTuples(newest=1)
else:
obsoletes = []
for (obsoleting, installed) in obsoletes:
obsoleting_pkg = self.getPackageObject(obsoleting,
allow_missing=True)
if obsoleting_pkg is None:
continue
topkg = self._test_loop(obsoleting_pkg, self._pkg2obspkg)
if topkg is not None:
obsoleting_pkg = topkg
installed_pkg = self.getInstalledPackageObject(installed)
txmbr = self.tsInfo.addObsoleting(obsoleting_pkg, installed_pkg)
self.tsInfo.addObsoleted(installed_pkg, obsoleting_pkg)
if requiringPo:
txmbr.setAsDep(requiringPo)
tx_return.append(txmbr)
for (new, old) in updates:
if self.tsInfo.isObsoleted(pkgtup=old):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s') %
old)
else:
new = self.getPackageObject(new, allow_missing=True)
if new is None:
continue
tx_return.extend(self.update(po=new))
# Upgrade the installed groups, as part of generic "yum upgrade"
if (self.conf.group_command == 'objects' and
self.conf.upgrade_group_objects_upgrade):
for ievgrp in self.igroups.environments:
tx_return.extend(self._at_groupupgrade('@^' + ievgrp))
for igrp in self.igroups.groups:
tx_return.extend(self._at_groupupgrade('@' + igrp))
return tx_return
# complications
# the user has given us something - either a package object to be
# added to the transaction as an update or they've given us a pattern
# of some kind
instpkgs = []
availpkgs = []
if po: # just a po
if po.repoid == 'installed':
instpkgs.append(po)
else:
availpkgs.append(po)
elif 'pattern' in kwargs:
if kwargs['pattern'] and kwargs['pattern'][0] == '-':
return self._minus_deselect(kwargs['pattern'])
if kwargs['pattern'] and kwargs['pattern'][0] == '@':
return self._at_groupupgrade(kwargs['pattern'])
arg = kwargs['pattern']
if not update_to:
instpkgs = self.rpmdb.returnPackages(patterns=[arg])
instpkgs = misc.filter_pkgs_repoid(instpkgs,
kwargs.get('repoid'))
else:
availpkgs = self.pkgSack.returnPackages(patterns=[arg],
repoid=kwargs.get('repoid'))
if not instpkgs and not availpkgs:
depmatches = []
try:
if update_to:
depmatches = self.returnPackagesByDep(arg)
else:
depmatches = self.returnInstalledPackagesByDep(arg)
except yum.Errors.YumBaseError, e:
self.logger.critical(_('%s') % e)
depmatches = misc.filter_pkgs_repoid(depmatches,
kwargs.get('repoid'))
if update_to:
availpkgs.extend(depmatches)
else:
instpkgs.extend(depmatches)
# Always look for available packages, it doesn't seem to do any
# harm (apart from some time). And it fixes weird edge cases where
# "update a" (which requires a new b) is different from "update b"
try:
if update_to:
m = []
elif kwargs.get('repoid'):
pats = [kwargs['pattern']]
m = self.pkgSack.sacks[kwargs['repoid']]
m = m.returnNewestByNameArch(patterns=pats)
else:
pats = [kwargs['pattern']]
m = self.pkgSack.returnNewestByNameArch(patterns=pats)
except Errors.PackageSackError:
m = []
availpkgs.extend(m)
if not availpkgs and not instpkgs:
self.logger.critical(_('No Match for argument: %s') % to_unicode(arg))
else: # we have kwargs, sort them out.
nevra_dict = self._nevra_kwarg_parse(kwargs)
instpkgs = self.rpmdb.searchNevra(name=nevra_dict['name'],
epoch=nevra_dict['epoch'], arch=nevra_dict['arch'],
ver=nevra_dict['version'], rel=nevra_dict['release'])
if not instpkgs:
availpkgs = self.pkgSack.searchNevra(name=nevra_dict['name'],
epoch=nevra_dict['epoch'], arch=nevra_dict['arch'],
ver=nevra_dict['version'], rel=nevra_dict['release'])
self._add_not_found_a(availpkgs, nevra_dict)
if len(availpkgs) > 1:
availpkgs = self._compare_providers(availpkgs, requiringPo)
availpkgs = map(lambda x: x[0], availpkgs)
elif not availpkgs:
pkg_warn(_("No package matched to upgrade: %s"), self._ui_nevra_dict(nevra_dict))
# for any thing specified
# get the list of available pkgs matching it (or take the po)
# get the list of installed pkgs matching it (or take the po)
# go through each list and look for:
# things obsoleting it if it is an installed pkg
# things it updates if it is an available pkg
# things updating it if it is an installed pkg
# in that order
# all along checking to make sure we:
# don't update something that's already been obsoleted
# don't update something that's already been updated
# if there are more than one package that matches an update from
# a pattern/kwarg then:
# if it is a valid update and we'
# TODO: we should search the updates and obsoletes list and
# mark the package being updated or obsoleted away appropriately
# and the package relationship in the tsInfo
# check for obsoletes first
if self.conf.obsoletes:
for installed_pkg in instpkgs:
obs_tups = self.up.obsoleted_dict.get(installed_pkg.pkgtup, [])
# This is done so we don't have to returnObsoletes(newest=True)
# It's a minor UI problem for RHEL, but might as well dtrt.
obs_pkgs = []
for pkgtup in obs_tups:
obsoleting_pkg = self.getPackageObject(pkgtup,
allow_missing=True)
if obsoleting_pkg is None:
continue
obs_pkgs.append(obsoleting_pkg)
# NOTE: Broekn wrt. repoid
for obsoleting_pkg in packagesNewestByName(obs_pkgs):
tx_return.extend(self.install(po=obsoleting_pkg))
for available_pkg in availpkgs:
for obsoleted_pkg in self._find_obsoletees(available_pkg):
obsoleted = obsoleted_pkg.pkgtup
txmbr = self.tsInfo.addObsoleting(available_pkg, obsoleted_pkg)
if requiringPo:
txmbr.setAsDep(requiringPo)
tx_return.append(txmbr)
if self.tsInfo.isObsoleted(obsoleted):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Package is already obsoleted: %s.%s %s:%s-%s') % obsoleted)
else:
txmbr = self.tsInfo.addObsoleted(obsoleted_pkg, available_pkg)
tx_return.append(txmbr)
for installed_pkg in instpkgs:
for updating in self.up.updatesdict.get(installed_pkg.pkgtup, []):
po = self.getPackageObject(updating, allow_missing=True)
if po is None:
continue
if self.tsInfo.isObsoleted(installed_pkg.pkgtup):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s') %
installed_pkg.pkgtup)
# at this point we are going to mark the pkg to be installed, make sure
# it doesn't obsolete anything. If it does, mark that in the tsInfo, too
elif po.pkgtup in self.up.getObsoletesList(name=po.name):
for obsoletee in self._find_obsoletees(po):
txmbr = self.tsInfo.addUpdate(po, installed_pkg)
if requiringPo:
txmbr.setAsDep(requiringPo)
self.tsInfo.addObsoleting(po, obsoletee)
self.tsInfo.addObsoleted(obsoletee, po)
tx_return.append(txmbr)
else:
if self.tsInfo.getMembersWithState(installed_pkg.pkgtup,
TS_REMOVE_STATES):
self.tsInfo.remove(installed_pkg.pkgtup)
txmbr = self._add_up_txmbr(requiringPo, po, installed_pkg)
tx_return.append(txmbr)
for available_pkg in availpkgs:
if not self._valid_install_arch(available_pkg):
continue
# "Just do it" if it's a local pkg.
if isinstance(available_pkg, YumLocalPackage):
n = available_pkg.name
for updated_pkg in self.rpmdb.returnNewestByName(n):
updated = updated_pkg.pkgtup
if self.tsInfo.getMembersWithState(updated,
TS_REMOVE_STATES):
self.tsInfo.remove(updated)
txmbr = self._add_up_txmbr(requiringPo,
available_pkg, updated_pkg)
tx_return.append(txmbr)
continue
# Make sure we're not installing a package which is obsoleted by
# something else in the repo. Unless there is a obsoletion loop,
# at which point ignore everything.
obsoleting_pkg = self._test_loop(available_pkg, self._pkg2obspkg)
if obsoleting_pkg is not None:
if (kwargs.get('repoid') and
obsoleting_pkg.repoid != kwargs.get('repoid')):
continue # Meh.
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is obsoleted: %s'), available_pkg)
tx_return.extend(self.update(po=obsoleting_pkg))
continue
for updated in self.up.updating_dict.get(available_pkg.pkgtup, []):
if self.tsInfo.isObsoleted(updated):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s') %
updated)
elif self._newer_update_in_trans(updated, available_pkg,
tx_return):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already updated: %s.%s %s:%s-%s') %
updated)
else:
updated_pkg = self.getInstalledPackageObject(updated)
if self.tsInfo.getMembersWithState(updated,
TS_REMOVE_STATES):
self.tsInfo.remove(updated)
txmbr = self._add_up_txmbr(requiringPo,
available_pkg, updated_pkg)
tx_return.append(txmbr)
# check to see if the pkg we want to install is not _quite_ the newest
# one but still technically an update over what is installed.
pot_updated = self.rpmdb.searchNevra(name=available_pkg.name, arch=available_pkg.arch)
if pot_updated and self.allowedMultipleInstalls(available_pkg):
# only compare against the newest of what's installed for kernel
pot_updated = sorted(pot_updated)[-1:]
#FIXME - potentially do the comparables thing from what used to
# be in cli.installPkgs() to see what we should be comparing
# it to of what is installed. in the meantime name.arch is
# most likely correct
# this is sorta a fix - but it shouldn't be only for localPackages
# else:
# if available_pkg in self.localPackages:
# # if we got here the potentially updated is not a matching arch
# # and we're goofed up in a localPackage that someone wants to apply for some odd reason
# # so we go for name-only update match and check
# pot_updated = self.rpmdb.searchNevra(name=available_pkg.name)
for ipkg in pot_updated:
if self.tsInfo.isObsoleted(ipkg.pkgtup):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s') %
ipkg.pkgtup)
elif self._newer_update_in_trans(ipkg.pkgtup, available_pkg,
tx_return):
self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already updated: %s.%s %s:%s-%s') %
ipkg.pkgtup)
elif ipkg.verLT(available_pkg):
txmbr = self._add_up_txmbr(requiringPo, available_pkg, ipkg)
tx_return.append(txmbr)
for txmbr in tx_return:
for i_pkg in self.rpmdb.searchNevra(name=txmbr.name):
if i_pkg not in txmbr.updates:
if self._does_this_update(txmbr.po, i_pkg):
self.tsInfo.addUpdated(i_pkg, txmbr.po)
return tx_return
def remove(self, po=None, **kwargs):
"""Mark the specified packages for removal. If a package
object is given, mark it for removal. Otherwise, mark the
package specified by the keyword arguments.
:param po: the package object to mark for installation
:param kwargs: If *po* is not given, the keyword arguments
will be used to specify a package to mark for installation
:return: a list of the transaction members that were added to
the transaction set by this method
:raises: :class:`yum.Errors.RemoveError` if nothing is specified
to mark for removal
"""
if not po and not kwargs:
raise Errors.RemoveError, 'Nothing specified to remove'
tx_return = []
pkgs = []
if po:
pkgs = [po]
else:
if 'pattern' in kwargs:
if kwargs['pattern'] and kwargs['pattern'][0] == '-':
return self._minus_deselect(kwargs['pattern'])
if kwargs['pattern'] and kwargs['pattern'][0] == '@':
return self._at_groupremove(kwargs['pattern'])
(e,m,u) = self.rpmdb.matchPackageNames([kwargs['pattern']])
if 'repoid' in kwargs:
e = misc.filter_pkgs_repoid(e, kwargs['repoid'])
m = misc.filter_pkgs_repoid(m, kwargs['repoid'])
pkgs.extend(e)
pkgs.extend(m)
if u:
depmatches = []
arg = u[0]
try:
depmatches = self.returnInstalledPackagesByDep(arg)
except yum.Errors.YumBaseError, e:
self.logger.critical(_('%s') % e)
if 'repoid' in kwargs:
depmatches = misc.filter_pkgs_repoid(depmatches,
kwargs['repoid'])
if not depmatches:
arg = to_unicode(arg)
self.logger.critical(_('No Match for argument: %s') % to_unicode(arg))
else:
pkgs.extend(depmatches)
else:
nevra_dict = self._nevra_kwarg_parse(kwargs)
pkgs = self.rpmdb.searchNevra(name=nevra_dict['name'],
epoch=nevra_dict['epoch'], arch=nevra_dict['arch'],
ver=nevra_dict['version'], rel=nevra_dict['release'])
self._add_not_found_i(pkgs, nevra_dict)
if len(pkgs) == 0:
if not kwargs.get('silence_warnings', False):
self.logger.warning(_("No package matched to remove: %s"), self._ui_nevra_dict(nevra_dict))
ts = self.rpmdb.readOnlyTS()
kern_pkgtup = misc.get_running_kernel_pkgtup(ts)
for po in pkgs:
if self.conf.protected_packages and po.pkgtup == kern_pkgtup:
self.logger.warning(_("Skipping the running kernel: %s") % po)
continue
if self.tsInfo.getMembers(po.pkgtup):
# This allows multiple reinstalls and update/downgrade "cancel"
for txmbr in self.tsInfo.matchNaevr(po.name):
self.logger.info(_("Removing %s from the transaction") %
txmbr)
self.tsInfo.remove(txmbr.pkgtup)
# Now start the remove/reinstall
txmbr = self.tsInfo.addErase(po)
tx_return.append(txmbr)
return tx_return
def installLocal(self, pkg, po=None, updateonly=False):
"""Mark a package on the local filesystem (i.e. not from a
repository) for installation.
:param pkg: a string specifying the path to an rpm file in the
local filesystem to be marked for installation
:param po: a :class:`yum.packages.YumLocalPackage`
:param updateonly: if True, the given package will only be
marked for installation if it is an upgrade for a package
that is already installed. If False, this restriction is
not enforced
:return: a list of the transaction members added to the
transaction set by this method
"""
# read in the package into a YumLocalPackage Object
# append it to self.localPackages
# check if it can be installed or updated based on nevra versus rpmdb
# don't import the repos until we absolutely need them for depsolving
tx_return = []
installpkgs = []
updatepkgs = []
donothingpkgs = []
if not po:
try:
po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg,
ua=default_grabber.opts.user_agent)
except Errors.MiscError:
self.logger.critical(_('Cannot open: %s. Skipping.'), pkg)
return tx_return
self.verbose_logger.log(logginglevels.INFO_2,
_('Examining %s: %s'), po.localpath, po)
# apparently someone wanted to try to install a drpm as an rpm :(
if po.hdr['payloadformat'] == 'drpm':
self.logger.critical(_('Cannot localinstall deltarpm: %s. Skipping.'), pkg)
return tx_return
# if by any chance we're a noncompat arch rpm - bail and throw out an error
# FIXME -our archlist should be stored somewhere so we don't have to
# do this: but it's not a config file sort of thing
# FIXME: Should add noarch, yum localinstall works ...
# just rm this method?
if po.arch not in self.arch.archlist:
self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch)
return tx_return
if self.conf.obsoletes:
obsoleters = po.obsoletedBy(self.rpmdb.searchObsoletes(po.name))
if obsoleters:
self.logger.critical(_('Cannot install package %s. It is obsoleted by installed package %s'), po, obsoleters[0])
return tx_return
# everything installed that matches the name
installedByKey = self.rpmdb.searchNevra(name=po.name)
# go through each package
if len(installedByKey) == 0: # nothing installed by that name
if updateonly:
self.logger.warning(_('Package %s not installed, cannot update it. Run yum install to install it instead.'), po.name)
return tx_return
else:
installpkgs.append(po)
for installed_pkg in installedByKey:
if po.verGT(installed_pkg): # we're newer - this is an update, pass to them
if installed_pkg.name in self.conf.exactarchlist:
if po.arch == installed_pkg.arch:
updatepkgs.append((po, installed_pkg))
else:
donothingpkgs.append(po)
else:
updatepkgs.append((po, installed_pkg))
elif po.verEQ(installed_pkg):
if (po.arch != installed_pkg.arch and
(isMultiLibArch(po.arch) or
isMultiLibArch(installed_pkg.arch))):
if updateonly:
self.logger.warning(_('Package %s.%s not installed, cannot update it. Run yum install to install it instead.'), po.name, po.arch)
else:
installpkgs.append(po)
else:
donothingpkgs.append(po)
elif self.allowedMultipleInstalls(po):
if updateonly:
self.logger.warning(_('Package %s.%s not installed, cannot update it. Run yum install to install it instead.'), po.name, po.arch)
else:
installpkgs.append(po)
else:
donothingpkgs.append(po)
# handle excludes for a localinstall
check_pkgs = installpkgs + [x[0] for x in updatepkgs]
if self._is_local_exclude(po, check_pkgs):
self.verbose_logger.debug(_('Excluding %s'), po)
return tx_return
for po in installpkgs:
self.verbose_logger.log(logginglevels.INFO_2,
_('Marking %s to be installed'), po.localpath)
self.localPackages.append(po)
tx_return.extend(self.install(po=po))
for (po, oldpo) in updatepkgs:
self.verbose_logger.log(logginglevels.INFO_2,
_('Marking %s as an update to %s'), po.localpath, oldpo)
self.localPackages.append(po)
txmbrs = self.update(po=po)
tx_return.extend(txmbrs)
for po in donothingpkgs:
self.verbose_logger.log(logginglevels.INFO_2,
_('%s: does not update installed package.'), po.localpath)
# this checks to make sure that any of the to-be-installed pkgs
# does not obsolete something else that's installed
# this doesn't handle the localpkgs obsoleting EACH OTHER or
# anything else in the transaction set, though. That could/should
# be fixed later but a fair bit of that is a pebkac and should be
# said as "don't do that". potential 'fixme'
for txmbr in tx_return:
# We don't want to do this twice, so only bother if the txmbr
# doesn't already obsolete anything.
if txmbr.po.obsoletes and not txmbr.obsoletes:
for obs_pkg in self._find_obsoletees(txmbr.po):
self.tsInfo.addObsoleted(obs_pkg, txmbr.po)
txmbr.obsoletes.append(obs_pkg)
self.tsInfo.addObsoleting(txmbr.po,obs_pkg)
return tx_return
def reinstallLocal(self, pkg, po=None):
"""Mark a package on the local filesystem (i.e. not from a
repository) for reinstallation.
:param pkg: a string specifying the path to an rpm file in the
local filesystem to be marked for reinstallation
:param po: a :class:`yum.packages.YumLocalPackage`
:return: a list of the transaction members added to the
transaction set by this method
"""
if not po:
try:
po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg,
ua=default_grabber.opts.user_agent)
except Errors.MiscError:
self.logger.critical(_('Cannot open file: %s. Skipping.'), pkg)
return []
self.verbose_logger.log(logginglevels.INFO_2,
_('Examining %s: %s'), po.localpath, po)
if po.arch not in self.arch.archlist:
self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch)
return []
# handle excludes for a local reinstall
if self._is_local_exclude(po, [po]):
self.verbose_logger.debug(_('Excluding %s'), po)
return []
return self.reinstall(po=po)
def reinstall(self, po=None, **kwargs):
"""Mark the given package for reinstallation. This is
accomplished by setting problem filters to allow a reinstall
take place, then calling :func:`install`.
:param po: the package object to mark for reinstallation
:param kwargs: if po is not given, the keyword will be used to
specify a package for reinstallation
:return: a list of the transaction members added to the
transaction set by this method
:raises: :class:`yum.Errors.ReinstallRemoveError` or
:class:`yum.Errors.ReinstallInstallError` depending the nature
of the error that is encountered
"""
self._add_prob_flags(rpm.RPMPROB_FILTER_REPLACEPKG,
rpm.RPMPROB_FILTER_REPLACENEWFILES,
rpm.RPMPROB_FILTER_REPLACEOLDFILES)
# NOTE: For repoid=foo we could do two things:
# 1. Only do the "remove" op. with packages installed from "foo".
# 2. Only do the "install" op. with packages available from "foo".
# ...so repoid=foo means #1 and repoid_install=foo means #2. Can also
# combine them.
tx_mbrs = []
if po: # The po, is the "available" po ... we want the installed po
tx_mbrs.extend(self.remove(pkgtup=po.pkgtup))
else:
tx_mbrs.extend(self.remove(**kwargs))
if not tx_mbrs:
raise Errors.ReinstallRemoveError, _("Problem in reinstall: no package matched to remove")
templen = len(tx_mbrs)
# this is a reinstall, so if we can't reinstall exactly what we uninstalled
# then we really shouldn't go on
new_members = []
failed = []
failed_pkgs = []
for item in tx_mbrs[:]:
# Make sure obsoletes processing is off, so we can reinstall()
# pkgs that are obsolete.
old_conf_obs = self.conf.obsoletes
self.conf.obsoletes = False
if isinstance(po, YumLocalPackage) and 'repoid' not in kwargs:
members = self.install(po=po)
else:
members = self.install(pkgtup=item.pkgtup,
repoid=kwargs.get('repoid_install'))
self.conf.obsoletes = old_conf_obs
if len(members) == 0:
self.tsInfo.remove(item.pkgtup)
tx_mbrs.remove(item)
failed.append(str(item.po))
failed_pkgs.append(item.po)
continue
new_members.extend(members)
if failed and not tx_mbrs:
raise Errors.ReinstallInstallError(_("Problem in reinstall: no package %s matched to install") % ", ".join(failed), failed_pkgs=failed_pkgs)
tx_mbrs.extend(new_members)
return tx_mbrs
def downgradeLocal(self, pkg, po=None):
"""Mark a package on the local filesystem (i.e. not from a
repository) to be downgraded.
:param pkg: a string specifying the path to an rpm file in the
local filesystem to be marked to be downgraded
:param po: a :class:`yum.packages.YumLocalPackage`
:return: a list of the transaction members added to the
transaction set by this method
"""
if not po:
try:
po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg,
ua=default_grabber.opts.user_agent)
except Errors.MiscError:
self.logger.critical(_('Cannot open file: %s. Skipping.'), pkg)
return []
self.verbose_logger.log(logginglevels.INFO_2,
_('Examining %s: %s'), po.localpath, po)
if po.arch not in self.arch.archlist:
self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch)
return []
# handle excludes for a local downgrade
if self._is_local_exclude(po, [po]):
self.verbose_logger.debug(_('Excluding %s'), po)
return []
return self.downgrade(po=po)
def _is_local_exclude(self, po, pkglist):
"""returns True if the local pkg should be excluded"""
if "all" in self.conf.disable_excludes or \
"main" in self.conf.disable_excludes:
return False
toexc = []
if len(self.conf.exclude) > 0:
exactmatch, matched, unmatched = \
parsePackages(pkglist, self.conf.exclude, casematch=1)
toexc = exactmatch + matched
if po in toexc:
return True
return False
def downgrade(self, po=None, **kwargs):
"""Mark a package to be downgraded. This is equivalent to
first removing the currently installed package, and then
installing the older version.
:param po: the package object to be marked to be downgraded
:param kwargs: if a package object is not given, the keyword
arguments will be used to specify a package to be marked to
be downgraded
:return: a list of the transaction members added to the
transaction set by this method
:raises: :class:`yum.Errors.DowngradeError` if no packages are
specified or available for downgrade
"""
if not po and not kwargs:
raise Errors.DowngradeError, 'Nothing specified to downgrade'
doing_group_pkgs = False
if po:
apkgs = [po]
elif 'pattern' in kwargs:
if kwargs['pattern'] and kwargs['pattern'][0] == '-':
return self._minus_deselect(kwargs['pattern'])
if kwargs['pattern'] and kwargs['pattern'][0] == '@':
apkgs = self._at_groupdowngrade(kwargs['pattern'])
doing_group_pkgs = True # Don't warn. about some things
else:
apkgs = self.pkgSack.returnPackages(patterns=[kwargs['pattern']],
ignore_case=False)
if not apkgs:
arg = kwargs['pattern']
self.verbose_logger.debug(_('Checking for virtual provide or file-provide for %s'),
arg)
try:
apkgs = self.returnPackagesByDep(arg)
except yum.Errors.YumBaseError, e:
self.logger.critical(_('No Match for argument: %s') % to_unicode(arg))
else:
nevra_dict = self._nevra_kwarg_parse(kwargs)
apkgs = self.pkgSack.searchNevra(name=nevra_dict['name'],
epoch=nevra_dict['epoch'],
arch=nevra_dict['arch'],
ver=nevra_dict['version'],
rel=nevra_dict['release'])
self._add_not_found_a(apkgs, nevra_dict)
if not apkgs:
# Do we still want to return errors here?
# We don't in the cases below, so I didn't here...
pkgs = []
if 'pattern' in kwargs:
pkgs = self.rpmdb.returnPackages(patterns=[kwargs['pattern']],
ignore_case=False)
if 'name' in kwargs:
pkgs = self.rpmdb.searchNevra(name=kwargs['name'])
if pkgs:
return []
raise Errors.DowngradeError, _('No package(s) available to downgrade')
warned_nas = set()
# Skip kernel etc.
tapkgs = []
for pkg in apkgs:
if self.allowedMultipleInstalls(pkg):
if (pkg.name, pkg.arch) not in warned_nas:
msg = _("Package %s is allowed multiple installs, skipping") % pkg
self.verbose_logger.log(logginglevels.INFO_2, msg)
warned_nas.add((pkg.name, pkg.arch))
continue
tapkgs.append(pkg)
apkgs = tapkgs
# Find installed versions of "to downgrade pkgs"
apkg_names = set()
for pkg in apkgs:
apkg_names.add(pkg.name)
ipkgs = self.rpmdb.searchNames(list(apkg_names))
latest_installed_na = {}
latest_installed_n = {}
for pkg in sorted(ipkgs):
if (pkg.name not in latest_installed_n or
pkg.verGT(latest_installed_n[pkg.name][0])):
latest_installed_n[pkg.name] = [pkg]
elif pkg.verEQ(latest_installed_n[pkg.name][0]):
latest_installed_n[pkg.name].append(pkg)
latest_installed_na[(pkg.name, pkg.arch)] = pkg
# Find "latest downgrade", ie. latest available pkg before
# installed version. Indexed fromn the latest installed pkgtup.
downgrade_apkgs = {}
for pkg in sorted(apkgs):
# We are cleverer here, I think...
# if not self._valid_install_arch(pkg, ipkgs):
# continue
na = (pkg.name, pkg.arch)
# Here we allow downgrades from .i386 => .noarch, or .i586 => .i386
# but not .i386 => .x86_64 (similar to update).
lipkg = None
if na in latest_installed_na:
lipkg = latest_installed_na[na]
elif pkg.name in latest_installed_n:
for tlipkg in latest_installed_n[pkg.name]:
if not canCoinstall(pkg.arch, tlipkg.arch):
lipkg = tlipkg
# Use this so we don't get confused when we have
# different versions with different arches.
na = (pkg.name, lipkg.arch)
break
if lipkg is None:
if (na not in warned_nas and not doing_group_pkgs and
pkg.name not in latest_installed_n):
msg = _('No Match for available package: %s') % pkg
self.logger.critical(msg)
warned_nas.add(na)
continue
if pkg.verEQ(lipkg):
continue
if pkg.verGE(lipkg):
if na not in warned_nas:
msg = _('Only Upgrade available on package: %s') % pkg
self.logger.critical(msg)
warned_nas.add(na)
continue
warned_nas.add(na)
if (lipkg.pkgtup in downgrade_apkgs and
pkg.verLE(downgrade_apkgs[lipkg.pkgtup])):
continue # Skip older than "latest downgrade"
downgrade_apkgs[lipkg.pkgtup] = pkg
tx_return = []
for ipkg in ipkgs:
if ipkg.pkgtup not in downgrade_apkgs:
continue
txmbrs = self.tsInfo.addDowngrade(downgrade_apkgs[ipkg.pkgtup],ipkg)
if not txmbrs: # Fail?
continue
self._add_prob_flags(rpm.RPMPROB_FILTER_OLDPACKAGE)
tx_return.extend(txmbrs)
return tx_return
@staticmethod
def _ui_nevra_dict(nevra_dict):
n = nevra_dict['name']
e = nevra_dict['epoch']
v = nevra_dict['version']
r = nevra_dict['release']
a = nevra_dict['arch']
if e and v and r:
evr = '%s:%s-%s' % (e, v, r)
elif v and r:
evr = '%s-%s' % (v, r)
elif e and v:
evr = '%s:%s' % (e, v)
elif v: # e and r etc. is just too weird to print
evr = v
else:
evr = ''
if n and evr:
return '%s-%s' % (n, evr)
if evr:
return '*-%s' % evr
if n:
return n
return '<unknown>'
def _nevra_kwarg_parse(self, kwargs):
returndict = {}
if 'pkgtup' in kwargs:
(n, a, e, v, r) = kwargs['pkgtup']
returndict['name'] = n
returndict['epoch'] = e
returndict['arch'] = a
returndict['version'] = v
returndict['release'] = r
return returndict
returndict['name'] = kwargs.get('name')
returndict['epoch'] = kwargs.get('epoch')
returndict['arch'] = kwargs.get('arch')
# get them as ver, version and rel, release - if someone
# specifies one of each then that's kinda silly.
returndict['version'] = kwargs.get('version')
if returndict['version'] is None:
returndict['version'] = kwargs.get('ver')
returndict['release'] = kwargs.get('release')
if returndict['release'] is None:
returndict['release'] = kwargs.get('rel')
return returndict
def history_redo(self, transaction,
force_reinstall=False, force_changed_removal=False):
"""Repeat the transaction represented by the given
:class:`yum.history.YumHistoryTransaction` object.
:param transaction: a
:class:`yum.history.YumHistoryTransaction` object
representing the transaction to be repeated
:param force_reinstall: bool - do we want to reinstall anything that was
installed/updated/downgraded/etc.
:param force_changed_removal: bool - do we want to force remove anything
that was downgraded or upgraded.
:return: whether the transaction was repeated successfully
"""
# NOTE: This is somewhat basic atm. ... see comment in undo.
# Also note that redo doesn't force install Dep-Install packages,
# which is probably what is wanted the majority of the time.
old_conf_obs = self.conf.obsoletes
self.conf.obsoletes = False
done = False
for pkg in transaction.trans_data:
if pkg.state == 'Reinstall':
if self.reinstall(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Downgrade':
if force_reinstall and self.rpmdb.searchPkgTuple(pkg.pkgtup):
if self.reinstall(pkgtup=pkg.pkgtup):
done = True
continue
try:
if self.downgrade(pkgtup=pkg.pkgtup):
done = True
except yum.Errors.DowngradeError:
self.logger.critical(_('Failed to downgrade: %s'), pkg)
for pkg in transaction.trans_data:
if force_changed_removal and pkg.state == 'Downgraded':
if self.tsInfo.getMembers(pkg.pkgtup):
continue
if self.remove(pkgtup=pkg.pkgtup, silence_warnings=True):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Update':
if force_reinstall and self.rpmdb.searchPkgTuple(pkg.pkgtup):
if self.reinstall(pkgtup=pkg.pkgtup):
done = True
continue
if self.update(pkgtup=pkg.pkgtup):
done = True
else:
self.logger.critical(_('Failed to upgrade: %s'), pkg)
for pkg in transaction.trans_data:
if force_changed_removal and pkg.state == 'Updated':
if self.tsInfo.getMembers(pkg.pkgtup):
continue
if self.remove(pkgtup=pkg.pkgtup, silence_warnings=True):
done = True
for pkg in transaction.trans_data:
if pkg.state in ('Install', 'True-Install', 'Obsoleting'):
if force_reinstall and self.rpmdb.searchPkgTuple(pkg.pkgtup):
if self.reinstall(pkgtup=pkg.pkgtup):
done = True
continue
if self.install(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Erase':
if self.remove(pkgtup=pkg.pkgtup):
done = True
self.conf.obsoletes = old_conf_obs
return done
def history_undo(self, transaction):
"""Undo the transaction represented by the given
:class:`yum.history.YumHistoryTransaction` object.
:param transaction: a
:class:`yum.history.YumHistoryTransaction` object
representing the transaction to be undone
:return: whether the transaction was undone successfully
"""
# NOTE: This is somewhat basic atm. ... for instance we don't check
# that we are going from the old new version. However it's still
# better than the RHN rollback code, and people pay for that :).
# We turn obsoletes off because we want the specific versions of stuff
# from history ... even if they've been obsoleted since then.
old_conf_obs = self.conf.obsoletes
self.conf.obsoletes = False
done = False
for pkg in transaction.trans_data:
if pkg.state == 'Reinstall':
if self.reinstall(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Updated':
try:
if self.downgrade(pkgtup=pkg.pkgtup):
done = True
except yum.Errors.DowngradeError:
self.logger.critical(_('Failed to downgrade: %s'), pkg)
for pkg in transaction.trans_data:
if pkg.state == 'Downgraded':
if self.update(pkgtup=pkg.pkgtup):
done = True
else:
self.logger.critical(_('Failed to upgrade: %s'), pkg)
for pkg in transaction.trans_data:
if pkg.state == 'Obsoleting':
# Note that obsoleting can mean anything, so if this is part of
# something else, it should be done by now (so do nothing).
if self.tsInfo.getMembers(pkg.pkgtup):
continue
# If not it should be an install/obsolete ... so remove it.
if self.remove(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state in ('Dep-Install', 'Install', 'True-Install'):
if self.remove(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Obsoleted':
if self.install(pkgtup=pkg.pkgtup):
done = True
for pkg in transaction.trans_data:
if pkg.state == 'Erase':
if self.install(pkgtup=pkg.pkgtup):
done = True
self.conf.obsoletes = old_conf_obs
return done
def _retrievePublicKey(self, keyurl, repo=None, getSig=True):
"""
Retrieve a key file
@param keyurl: url to the key to retrieve
Returns a list of dicts with all the keyinfo
"""
key_installed = False
msg = _('Retrieving key from %s') % keyurl
self.verbose_logger.log(logginglevels.INFO_2, msg)
# Go get the GPG key from the given URL
try:
url = misc.to_utf8(keyurl)
if repo is None:
opts = {'limit':9999}
text = 'global/gpgkey'
else:
# If we have a repo. use the proxy etc. configuration for it.
# In theory we have a global proxy config. too, but meh...
# external callers should just update.
opts = repo._default_grabopts()
text = repo.id + '/gpgkey'
rawkey = urlgrabber.urlread(url, **opts)
except urlgrabber.grabber.URLGrabError, e:
raise Errors.YumBaseError(_('GPG key retrieval failed: ') +
exception2msg(e))
# check for a .asc file accompanying it - that's our gpg sig on the key
# suck it down and do the check
sigfile = None
valid_sig = False
if getSig and repo and repo.gpgcakey:
self.getCAKeyForRepo(repo, callback=repo.confirm_func)
try:
url = misc.to_utf8(keyurl + '.asc')
opts = repo._default_grabopts()
text = repo.id + '/gpgkeysig'
sigfile = urlgrabber.urlopen(url, **opts)
except urlgrabber.grabber.URLGrabError, e:
sigfile = None
if sigfile:
if not misc.valid_detached_sig(sigfile,
StringIO.StringIO(rawkey), repo.gpgcadir):
#if we decide we want to check, even though the sig failed
# here is where we would do that
raise Errors.YumBaseError(_('GPG key signature on key %s does not match CA Key for repo: %s') % (url, repo.id))
else:
msg = _('GPG key signature verified against CA Key(s)')
self.verbose_logger.log(logginglevels.INFO_2, msg)
valid_sig = True
# Parse the key
try:
keys_info = misc.getgpgkeyinfo(rawkey, multiple=True)
except ValueError, e:
raise Errors.YumBaseError(_('Invalid GPG Key from %s: %s') %
(url, exception2msg(e)))
keys = []
for keyinfo in keys_info:
thiskey = {}
for info in ('keyid', 'timestamp', 'userid',
'fingerprint', 'raw_key'):
if info not in keyinfo:
raise Errors.YumBaseError, \
_('GPG key parsing failed: key does not have value %s') + info
thiskey[info] = keyinfo[info]
thiskey['hexkeyid'] = misc.keyIdToRPMVer(keyinfo['keyid']).upper()
thiskey['valid_sig'] = valid_sig
thiskey['has_sig'] = bool(sigfile)
keys.append(thiskey)
return keys
def _getKeyImportMessage(self, info, keyurl, keytype='GPG'):
msg = None
if keyurl.startswith("file:"):
fname = keyurl[len("file:"):]
pkgs = self.rpmdb.searchFiles(fname)
if pkgs:
pkgs = sorted(pkgs)[-1]
msg = (_('Importing %s key 0x%s:\n'
' Userid : "%s"\n'
' Fingerprint: %s\n'
' Package : %s (%s)\n'
' From : %s') %
(keytype, info['hexkeyid'], to_unicode(info['userid']),
misc.gpgkey_fingerprint_ascii(info),
pkgs, pkgs.ui_from_repo,
keyurl.replace("file://","")))
if msg is None:
msg = (_('Importing %s key 0x%s:\n'
' Userid : "%s"\n'
' Fingerprint: %s\n'
' From : %s') %
(keytype, info['hexkeyid'], to_unicode(info['userid']),
misc.gpgkey_fingerprint_ascii(info),
keyurl.replace("file://","")))
self.logger.critical("%s", msg)
def getKeyForPackage(self, po, askcb = None, fullaskcb = None):
"""Retrieve a key for a package. If needed, use the given
callback to prompt whether the key should be imported.
:param po: the package object to retrieve the key of
:param askcb: Callback function to use to ask permission to
import a key. The arguments *askck* should take are the
package object, the userid of the key, and the keyid
:param fullaskcb: Callback function to use to ask permission to
import a key. This differs from *askcb* in that it gets
passed a dictionary so that we can expand the values passed.
:raises: :class:`yum.Errors.YumBaseError` if there are errors
retrieving the keys
"""
repo = self.repos.getRepo(po.repoid)
keyurls = repo.gpgkey
key_installed = False
def _prov_key_data(msg):
msg += _('\n\n\n'
' Failing package is: %s\n'
' GPG Keys are configured as: %s\n'
) % (po, ", ".join(repo.gpgkey))
return msg
user_cb_fail = False
for keyurl in keyurls:
keys = self._retrievePublicKey(keyurl, repo)
for info in keys:
ts = self.rpmdb.readOnlyTS()
# Check if key is already installed
if misc.keyInstalled(ts, info['keyid'], info['timestamp']) >= 0:
self.logger.info(_('GPG key at %s (0x%s) is already installed') % (
keyurl, info['hexkeyid']))
continue
if repo.gpgcakey and info['has_sig'] and info['valid_sig']:
key_installed = True
else:
# Try installing/updating GPG key
self._getKeyImportMessage(info, keyurl)
rc = False
if self.conf.assumeno:
rc = False
elif self.conf.assumeyes:
rc = True
# grab the .sig/.asc for the keyurl, if it exists
# if it does check the signature on the key
# if it is signed by one of our ca-keys for this repo or the global one
# then rc = True
# else ask as normal.
elif fullaskcb:
rc = fullaskcb({"po": po, "userid": info['userid'],
"hexkeyid": info['hexkeyid'],
"keyurl": keyurl,
"fingerprint": info['fingerprint'],
"timestamp": info['timestamp']})
elif askcb:
rc = askcb(po, info['userid'], info['hexkeyid'])
if not rc:
user_cb_fail = True
continue
# Import the key
ts = self.rpmdb.readOnlyTS()
result = ts.pgpImportPubkey(misc.procgpgkey(info['raw_key']))
if result != 0:
msg = _('Key import failed (code %d)') % result
raise Errors.YumBaseError, _prov_key_data(msg)
self.logger.info(_('Key imported successfully'))
key_installed = True
if not key_installed and user_cb_fail:
raise Errors.YumBaseError, _("Didn't install any keys")
if not key_installed:
msg = _('The GPG keys listed for the "%s" repository are ' \
'already installed but they are not correct for this ' \
'package.\n' \
'Check that the correct key URLs are configured for ' \
'this repository.') % repo.name
raise Errors.YumBaseError, _prov_key_data(msg)
# Check if the newly installed keys helped
result, errmsg = self.sigCheckPkg(po)
if result != 0:
msg = _("Import of key(s) didn't help, wrong key(s)?")
self.logger.info(msg)
errmsg = to_unicode(errmsg)
raise Errors.YumBaseError, _prov_key_data(errmsg)
def _getAnyKeyForRepo(self, repo, destdir, keyurl_list, is_cakey=False, callback=None):
"""
Retrieve a key for a repository If needed, prompt for if the key should
be imported using callback
@param repo: Repository object to retrieve the key of.
@param destdir: destination of the gpg pub ring
@param keyurl_list: list of urls for gpg keys
@param is_cakey: bool - are we pulling in a ca key or not
@param callback: Callback function to use for asking for permission to
import a key. This is verification, but also "choice".
Takes a dictionary of key info.
"""
key_installed = False
def _prov_key_data(msg):
cakeytxt = _("No")
if is_cakey:
cakeytxt = _("Yes")
msg += _('\n\n\n'
' CA Key: %s\n'
' Failing repo is: %s\n'
' GPG Keys are configured as: %s\n'
) % (cakeytxt, repo, ", ".join(keyurl_list))
return msg
user_cb_fail = False
for keyurl in keyurl_list:
keys = self._retrievePublicKey(keyurl, repo, getSig=not is_cakey)
for info in keys:
# Check if key is already installed
if hex(int(info['keyid']))[2:-1].upper() in misc.return_keyids_from_pubring(destdir):
self.logger.info(_('GPG key at %s (0x%s) is already imported') % (
keyurl, info['hexkeyid']))
key_installed = True
continue
# Try installing/updating GPG key
if is_cakey:
# know where the 'imported_cakeys' file is
ikf = repo.base_persistdir + '/imported_cakeys'
keytype = 'CA'
cakeys = []
try:
cakeys_d = open(ikf, 'r').read()
cakeys = cakeys_d.split('\n')
except (IOError, OSError):
pass
if str(info['hexkeyid']) in cakeys:
key_installed = True
else:
keytype = 'GPG'
if repo.gpgcakey and info['has_sig'] and info['valid_sig']:
key_installed = True
if not key_installed:
self._getKeyImportMessage(info, keyurl, keytype)
rc = False
if self.conf.assumeno:
rc = False
elif self.conf.assumeyes:
rc = True
elif callback:
rc = callback({"repo": repo, "userid": info['userid'],
"hexkeyid": info['hexkeyid'], "keyurl": keyurl,
"fingerprint": info['fingerprint'],
"timestamp": info['timestamp']})
if not rc:
user_cb_fail = True
continue
# Import the key
result = misc.import_key_to_pubring(info['raw_key'], info['hexkeyid'], gpgdir=destdir)
if not result:
msg = _('Key %s import failed') % info['hexkeyid']
raise Errors.YumBaseError, _prov_key_data(msg)
self.logger.info(_('Key imported successfully'))
key_installed = True
# write out the key id to imported_cakeys in the repos basedir
if is_cakey and key_installed:
if info['hexkeyid'] not in cakeys:
ikfo = open(ikf, 'a')
try:
ikfo.write(info['hexkeyid']+'\n')
ikfo.flush()
ikfo.close()
except (IOError, OSError):
# maybe a warning - but in general this is not-critical, just annoying to the user
pass
if not key_installed and user_cb_fail:
msg = _("Didn't install any keys for repo %s") % repo
raise Errors.YumBaseError, _prov_key_data(msg)
if not key_installed:
msg = \
_('The GPG keys listed for the "%s" repository are ' \
'already installed but they are not correct.\n' \
'Check that the correct key URLs are configured for ' \
'this repository.') % (repo.name)
raise Errors.YumBaseError, _prov_key_data(msg)
def getKeyForRepo(self, repo, callback=None):
"""Retrieve a key for a repository. If needed, use the given
callback to prompt whether the key should be imported.
:param repo: repository object to retrieve the key of
:param callback: callback function to use for asking for
verification of key information
"""
self._getAnyKeyForRepo(repo, repo.gpgdir, repo.gpgkey, is_cakey=False, callback=callback)
def getCAKeyForRepo(self, repo, callback=None):
"""Retrieve a key for a repository. If needed, use the given
callback to prompt whether the key should be imported.
:param repo: repository object to retrieve the key of
:param callback: callback function to use for asking for
verification of key information
"""
self._getAnyKeyForRepo(repo, repo.gpgcadir, repo.gpgcakey, is_cakey=True, callback=callback)
def _limit_installonly_pkgs(self):
""" Limit packages based on conf.installonly_limit, if any of the
packages being installed have a provide in conf.installonlypkgs.
New in 3.2.24: Obey yumdb_info.installonly data. """
def _sort_and_filter_installonly(pkgs):
""" Allow the admin to specify some overrides for installonly pkgs.
using the yumdb. """
ret_beg = []
ret_mid = []
ret_end = []
for pkg in sorted(pkgs):
if 'installonly' not in pkg.yumdb_info:
ret_mid.append(pkg)
continue
if pkg.yumdb_info.installonly == 'keep':
continue
if True: # Don't to magic sorting, yet
ret_mid.append(pkg)
continue
if pkg.yumdb_info.installonly == 'remove-first':
ret_beg.append(pkg)
elif pkg.yumdb_info.installonly == 'remove-last':
ret_end.append(pkg)
else:
ret_mid.append(pkg)
return ret_beg + ret_mid + ret_end
if self.conf.installonly_limit < 1 :
return
# We shouldn't alter the transaction if this is set...
if self.tsInfo._check_future_rpmdbv:
return
toremove = []
# We "probably" want to use either self.ts or self.rpmdb.ts if either
# is available. However each ts takes a ref. on signals generally, and
# SIGINT specifically, so we _must_ have got rid of all of the used tses
# before we try downloading. This is called from buildTransaction()
# so self.rpmdb.ts should be valid.
ts = self.rpmdb.readOnlyTS()
(cur_kernel_v, cur_kernel_r) = misc.get_running_kernel_version_release(ts)
found = {}
for m in self.tsInfo.getMembers():
if m.ts_state not in ('i', 'u'):
continue
if m.reinstall:
continue
if not self.allowedMultipleInstalls(m.po):
continue
if m.name not in found:
found[m.name] = 1
else:
found[m.name] += 1
for name in found:
installed = self.rpmdb.searchNevra(name=name)
installed = _sort_and_filter_installonly(installed)
total = len(installed) + found[name]
if total <= self.conf.installonly_limit:
continue # Not adding enough to trigger.
# Number left to try and remove...
numleft = total - self.conf.installonly_limit
for po in installed:
if (po.version, po.release) == (cur_kernel_v, cur_kernel_r):
# don't remove running
continue
if numleft == 0:
break
toremove.append((po,m))
numleft -= 1
for po,rel in toremove:
txmbr = self.tsInfo.addErase(po)
# Add a dep relation to the new version of the package, causing this one to be erased
# this way skipbroken, should clean out the old one, if the new one is skipped
txmbr.depends_on.append(rel)
def processTransaction(self, callback=None,rpmTestDisplay=None, rpmDisplay=None):
"""Process the current transaction. This involves the
following steps:
- Download the packages
- Check the GPG signatures of the packages
- Run the test RPM transaction
- Run the RPM Transaction
The *callback*.event method is called at the start, and
between each step.
:param callback: a callback object, which must have an event
method
:param rpmTestDisplay: name of the display class to use in the
RPM test transaction
:param rpmDisplay: name of the display class to use in the rpm
transaction
"""
if not callback:
callback = callbacks.ProcessTransNoOutputCallback()
# Download Packages
callback.event(callbacks.PT_DOWNLOAD)
pkgs = self._downloadPackages(callback)
# Check Package Signatures
if pkgs != None:
callback.event(callbacks.PT_GPGCHECK)
self._checkSignatures(pkgs,callback)
# Run Test Transaction
callback.event(callbacks.PT_TEST_TRANS)
self._doTestTransaction(callback,display=rpmTestDisplay)
# Run Transaction
callback.event(callbacks.PT_TRANSACTION)
self._doTransaction(callback,display=rpmDisplay)
def _downloadPackages(self,callback):
''' Download the need packages in the Transaction '''
# This can be overloaded by a subclass.
dlpkgs = map(lambda x: x.po, filter(lambda txmbr:
txmbr.ts_state in ("i", "u"),
self.tsInfo.getMembers()))
# Check if there is something to do
if len(dlpkgs) == 0:
return None
# make callback with packages to download
callback.event(callbacks.PT_DOWNLOAD_PKGS,dlpkgs)
try:
probs = self.downloadPkgs(dlpkgs)
except IndexError:
raise Errors.YumBaseError, [_("Unable to find a suitable mirror.")]
if len(probs) > 0:
errstr = [_("Errors were encountered while downloading packages.")]
for key in probs:
errors = misc.unique(probs[key])
for error in errors:
errstr.append("%s: %s" % (key, error))
raise Errors.YumDownloadError, errstr
return dlpkgs
def _checkSignatures(self,pkgs,callback):
''' The the signatures of the downloaded packages '''
# This can be overloaded by a subclass.
for po in pkgs:
result, errmsg = self.sigCheckPkg(po)
if result == 0:
# Verified ok, or verify not req'd
continue
elif result == 1:
self.getKeyForPackage(po, self._askForGPGKeyImport)
else:
raise Errors.YumGPGCheckError, errmsg
return 0
def _askForGPGKeyImport(self, po, userid, hexkeyid):
'''
Ask for GPGKeyImport
This need to be overloaded in a subclass to make GPG Key import work
'''
return False
def _doTestTransaction(self,callback,display=None):
''' Do the RPM test transaction '''
self.initActionTs()
# save our dsCallback out
dscb = self.dsCallback
self.dsCallback = None # dumb, dumb dumb dumb!
self.populateTs( keepold=0 ) # sigh
# This can be overloaded by a subclass.
self.verbose_logger.log(logginglevels.INFO_2,
_('Running Transaction Check'))
msgs = self._run_rpm_check()
if msgs:
rpmlib_only = True
for msg in msgs:
if msg.startswith('rpmlib('):
continue
rpmlib_only = False
if rpmlib_only:
retmsgs = [_("ERROR You need to update rpm to handle:")]
retmsgs.extend(msgs)
raise Errors.YumRPMCheckError, retmsgs
retmsgs = [_('ERROR with transaction check vs depsolve:')]
retmsgs.extend(msgs)
# Don't encourage users to file a bug here, as this is probably
# pre-existing issue in dependendies of installed packages
raise Errors.YumRPMCheckError,retmsgs
tsConf = {}
for feature in ['diskspacecheck']: # more to come, I'm sure
tsConf[feature] = getattr( self.conf, feature )
#
testcb = RPMTransaction(self, test=True)
# overwrite the default display class
if display:
testcb.display = display
tserrors = self.ts.test( testcb, conf=tsConf )
del testcb
if len( tserrors ) > 0:
errstring = _('Test Transaction Errors: ')
for descr in tserrors:
errstring += ' %s\n' % descr
raise Errors.YumTestTransactionError, errstring
del self.ts
# put back our depcheck callback
self.dsCallback = dscb
def _doTransaction(self,callback,display=None):
''' do the RPM Transaction '''
# This can be overloaded by a subclass.
self.initActionTs() # make a new, blank ts to populate
self.populateTs( keepold=0 ) # populate the ts
self.ts.check() # required for ordering
self.ts.order() # order
cb = RPMTransaction(self,display=SimpleCliCallBack)
# overwrite the default display class
if display:
cb.display = display
self.runTransaction( cb=cb )
def _run_rpm_check(self):
results = []
self.ts.check()
for prob in self.ts.problems():
# Newer rpm (4.8.0+) has problem objects, older have just strings.
# Should probably move to using the new objects, when we can. For
# now just be compatible.
results.append(to_str(prob))
return results
def add_enable_repo(self, repoid, baseurls=[], mirrorlist=None, **kwargs):
"""Add and enable a repository.
:param repoid: a string specifying the name of the repository
:param baseurls: a list of strings specifying the urls for
the repository. At least one base url, or one mirror, must
be given
:param mirrorlist: a list of strings specifying a list of
mirrors for the repository. At least one base url, or one
mirror must be given
:param kwargs: key word arguments to set any normal repository
attribute
:return: the new repository that has been added and enabled
"""
# out of place fixme - maybe we should make this the default repo addition
# routine and use it from getReposFromConfigFile(), etc.
newrepo = yumRepo.YumRepository(repoid)
newrepo.name = repoid
newrepo.basecachedir = self.conf.cachedir
var_convert = kwargs.get('variable_convert', True)
if baseurls:
replaced = []
if var_convert:
for baseurl in baseurls:
if baseurl:
replaced.append(varReplace(baseurl, self.conf.yumvar))
else:
replaced = baseurls
newrepo.baseurl = replaced
if mirrorlist:
if var_convert:
mirrorlist = varReplace(mirrorlist, self.conf.yumvar)
newrepo.mirrorlist = mirrorlist
# setup the repo
newrepo.setup(cache=self.conf.cache)
# some reasonable defaults, (imo)
newrepo.enablegroups = True
newrepo.metadata_expire = 0
newrepo.gpgcheck = self.conf.gpgcheck
newrepo.repo_gpgcheck = self.conf.repo_gpgcheck
newrepo.basecachedir = self.conf.cachedir
newrepo.base_persistdir = self.conf._repos_persistdir
for key in kwargs.keys():
if not hasattr(newrepo, key): continue # skip the ones which aren't vars
setattr(newrepo, key, kwargs[key])
# add the new repo
self.repos.add(newrepo)
# enable the main repo
self.repos.enableRepo(newrepo.id)
return newrepo
def setCacheDir(self, force=False, tmpdir=None, reuse=True,
suffix='/$basearch/$releasever'):
"""Set a new cache directory.
:param force: whether to force the cache directory to be
changed
:param tmpdir: a temporary directory
:param reuse: whether the temporary directory can be reused
:param suffix: suffix to attach to the directory name
:return: whether the new cache directory is successfully set
"""
if not force and os.geteuid() == 0:
return True # We are root, not forced, so happy with the global dir.
if tmpdir is None:
tmpdir = os.getenv('TMPDIR')
if tmpdir is None: # Note that TMPDIR isn't exported by default :(
tmpdir = '/var/tmp'
try:
cachedir = misc.getCacheDir(tmpdir, reuse)
except (IOError, OSError), e:
self.logger.critical(_('Could not set cachedir: %s') % exception2msg(e))
cachedir = None
if cachedir is None:
return False # Tried, but failed, to get a "user" cachedir
cachedir += varReplace(suffix, self.conf.yumvar)
if hasattr(self, 'prerepoconf'):
self.prerepoconf.cachedir = cachedir
else:
self.repos.setCacheDir(cachedir)
if not hasattr(self, '_old_cachedir'):
self._old_cachedir = self.conf.cachedir
self.conf.cachedir = cachedir
return True # We got a new cache dir
def _does_this_update(self, pkg1, pkg2):
"""returns True if pkg1 can update pkg2, False if not.
This only checks if it can be an update it does not check if
it is obsoleting or anything else."""
if pkg1.name != pkg2.name:
return False
if pkg1.verLE(pkg2):
return False
if pkg1.arch not in self.arch.archlist:
return False
if rpmUtils.arch.canCoinstall(pkg1.arch, pkg2.arch):
return False
if self.allowedMultipleInstalls(pkg1):
return False
return True
def _store_config_in_history(self):
self.history.write_addon_data('config-main', self.conf.dump())
myrepos = ''
for repo in self.repos.listEnabled():
myrepos += repo.dump()
myrepos += '\n'
self.history.write_addon_data('config-repos', myrepos)
def verify_plugins_cb(self, verify_package):
"""Callback to call a plugin hook for pkg.verify().
:param verify_package: a conduit for the callback
:return: *verify_package*
"""
self.plugins.run('verify_package', verify_package=verify_package)
return verify_package
def save_ts(self, filename=None, auto=False):
"""Save out a transaction to a .yumtx file to be loaded later.
:param filename: the name of the file to save the transaction
in. If *filename* is not given, a name will be generated
:param auto: whether to output errors to the logger, rather
than raising exceptions
:raises: :class:`yum.Errors.YumBaseError` if there are errors
saving the transaction
"""
if self.tsInfo._unresolvedMembers:
if auto:
self.logger.critical(_("Dependencies not solved. Will not save unresolved transaction."))
return
raise Errors.YumBaseError(_("Dependencies not solved. Will not save unresolved transaction."))
if not filename:
prefix = 'yum_save_tx.%s' % time.strftime('%Y-%m-%d.%H-%M.')
fd,filename = tempfile.mkstemp(suffix='.yumtx', prefix=prefix)
f = os.fdopen(fd, 'w')
else:
f = open(filename, 'w')
self._ts_save_file = filename
msg = "%s\n" % self.rpmdb.simpleVersion(main_only=True)[0]
msg += "%s\n" % self.ts.getTsFlags()
if self.tsInfo._pkgSack is None: # Transactions have pkgSack?
msg += "1\n"
else:
msg += "%s\n" % (len(self.repos.listEnabled()) + 1)
for r in self.repos.listEnabled():
msg += "%s:%s:%s\n" % (r.id, len(r.sack), r.repoXML.revision)
# Save what we think the future rpmdbv will be.
msg += "%s:%s\n" % ('installed', self.tsInfo.futureRpmDBVersion())
msg += "%s\n" % len(self.tsInfo.getMembers())
for txmbr in self.tsInfo.getMembers():
msg += txmbr._dump()
try:
f.write(msg)
f.close()
except (IOError, OSError), e:
self._ts_save_file = None
if auto:
self.logger.critical(_("Could not save transaction file %s: %s") % (filename, exception2msg(e)))
else:
raise Errors.YumBaseError(_("Could not save transaction file %s: %s") % (filename, exception2msg(e)))
def _load_ts_data(self, filename):
""" Load the file into a simple data format. """
try:
data = open(filename, 'r').readlines()
except (IOError, OSError), e:
return (exception2msg(e), None)
if not data:
return (_("File is empty."), None)
if data[0] == 'saved_tx:\n':
# Old versions of yum would put "saved_tx:" at the beginning and
# two blank lines at the end when you used:
# "yum -q history addon-info saved_tx".
if data[-1] == 'history addon-info\n':
# Might as well also DTRT if they hand removed the plugins line
data = data[1:-3]
else:
data = data[1:-2]
return (None, data)
def load_ts(self, filename, ignorerpm=None, ignoremissing=None,
ignorenewrpm=None):
"""Load a transaction from a .yumtx file.
:param filename: the name of the file to load the transaction
from
:param ignorerpm: whether to ignore starting rpmdb version mismatch.
:param ignoremissing: whether to ignore that there may be
transaction members missing
:param ignorenewrpm: whether to ignore ending rpmdb version mismatch.
:return: the members of the loaded transaction
:raises: :class:`yum.Errors.YumBaseError` if there are problems
loading the transaction
"""
# check rpmversion - if not match throw a fit
# check repoversions (and repos)- if not match throw a fit
# load each txmbr - if pkgs being updated don't exist, bail w/error
# setup any ts flags
# setup cmds for history/yumdb to know about
# return txmbrs loaded
data = self._load_ts_data(filename)
if data[0] is not None:
raise Errors.YumBaseError(_("Could not access/read saved transaction %s : %s") % (filename, data[0]))
data = data[1]
if ignorerpm is None:
ignorerpm = self.conf.loadts_ignorerpm
if ignorenewrpm is None:
ignorenewrpm = self.conf.loadts_ignorenewrpm
if ignoremissing is None:
ignoremissing = self.conf.loadts_ignoremissing
# Inherit this, because for the ending version to match the starting
# version must match.
if ignorerpm:
ignorenewrpm = True
# data format
# 0 == rpmdb version
# 1 == tsflags
# 2 == numrepos
# 3:numrepos = repos
# -- post 3.2.29 update: 'installed' repo. added with the values as the
# new rpmdb version.
# 3+numrepos = num pkgs
# 3+numrepos+1 -> EOF= txmembers
# rpm db ver
rpmv = data[0].strip()
if rpmv != str(self.rpmdb.simpleVersion(main_only=True)[0]):
msg = _("rpmdb ver mismatched saved transaction version,")
if ignorerpm:
msg += _(" ignoring, as requested.")
self.logger.critical(_(msg))
else:
msg += _(" aborting.")
raise Errors.YumBaseError(msg)
# tsflags
# FIXME - probably should let other tsflags play nicely together
# so someone can add --nogpgcheck or --nodocs or --nodiskspace or some nonsense and have it work
try:
tsflags = int(data[1].strip())
except (ValueError, IndexError), e:
msg = _("cannot find tsflags or tsflags not integer.")
raise Errors.YumBaseError(msg)
self.ts.setFlags(tsflags)
# repos
numrepos = int(data[2].strip())
repos = []
rindex=3+numrepos
future_rpmdbv = None
for r in data[3:rindex]:
repo = r.strip().split(':')
if repo and repo[0] == 'installed':
# This is an update hack to list the _future_ rpmdb version.
# Doing it this way allows older yum's to load newer ts files.
future_rpmdbv = "%s:%s" % (repo[1], repo[2])
continue
repos.append(repo)
# pkgs/txmbrs
numpkgs = int(data[rindex].strip())
pkgstart = rindex + 1
pkgcount = 0
pkgprob = False
curpkg = None
missingany = False
for l in data[pkgstart:]:
l = l.rstrip()
# our main txmbrs
if l.startswith('mbr:'):
if curpkg:
self.tsInfo.add(curpkg)
if curpkg in self.tsInfo._unresolvedMembers and not missingany:
self.tsInfo._unresolvedMembers.remove(curpkg)
missingany = False
pkgtup, current_state = l.split(':')[1].strip().split(' ')
current_state = int(current_state.strip())
pkgtup = tuple(pkgtup.strip().split(','))
try:
if current_state == TS_INSTALL:
po = self.getInstalledPackageObject(pkgtup)
elif current_state == TS_AVAILABLE:
po = self.getPackageObject(pkgtup)
else:
msg = _("Found txmbr in unknown current state: %s" % current_state)
raise Errors.YumBaseError(msg)
except Errors.YumBaseError, e:
missingany = True
msg = _("Could not find txmbr: %s in state %s" % (str(pkgtup), current_state))
if not ignoremissing:
raise Errors.YumBaseError(msg)
else:
ignorenewrpm = True
self.logger.critical(msg)
else:
pkgcount += 1
curpkg = transactioninfo.TransactionMember(po)
curpkg.current_state = current_state
continue
l = l.strip()
k,v = l.split(':', 1)
v = v.lstrip()
# attributes of our txmbrs
if k in ('isDep', 'reinstall'):
v = v.strip().lower()
if v == 'false':
setattr(curpkg, k, False)
elif v == 'true':
setattr(curpkg, k, True)
elif k in ('output_state'):
setattr(curpkg, k, int(v.strip()))
elif k in ('groups'):
curpkg.groups.extend(v.split(' '))
# the relationships to our main txmbrs
elif k in ('updated_by', 'obsoleted_by', 'downgraded_by',
'downgrades', 'updates', 'obsoletes', 'depends_on'):
for pkgspec in v.strip().split(' '):
pkgtup, origin = pkgspec.split('@')
try:
if origin == 'i':
po = self.getInstalledPackageObject(tuple(pkgtup.split(',')))
else:
po = self.getPackageObject(tuple(pkgtup.split(',')))
except Errors.YumBaseError, e:
msg = _("Could not find txmbr: %s from origin: %s" % (str(pkgtup), origin))
self.logger.critical(msg)
missingany = True
else:
curlist = getattr(curpkg, k)
curlist.append(po)
setattr(curpkg, k, curlist)
elif k in ('relatedto'):
for item in v.split(' '):
pkgspec, rel = item.split(':')
pkgtup,origin = pkgspec.split('@')
try:
if origin == 'i':
po = self.getInstalledPackageObject(tuple(pkgtup.split(',')))
else:
po = self.getPackageObject(tuple(pkgtup.split(',')))
except Errors.YumBaseError, e:
msg = _("Could not find txmbr: %s from origin: %s" % (str(pkgtup), origin))
self.logger.critical(msg)
missingany = True
else:
curlist = getattr(curpkg, k)
curlist.append((po,rel))
setattr(curpkg, k, curlist)
# the plain strings
else: #ts_state, reason
setattr(curpkg, k, v.strip())
if missingany:
pkgprob = True
# make sure we get the last one in!
self.tsInfo.add(curpkg)
if curpkg in self.tsInfo._unresolvedMembers:
self.tsInfo._unresolvedMembers.remove(curpkg)
if numpkgs != pkgcount:
pkgprob = True
if pkgprob:
msg = _("Transaction members, relations are missing or ts has been modified,")
if ignoremissing:
ignorenewrpm = True
msg += _(" ignoring, as requested. You must redepsolve!")
self.logger.critical(msg)
else:
msg += _(" aborting.")
raise Errors.YumBaseError(msg)
if len(self.tsInfo) != pkgcount:
future_rpmdbv = None
if future_rpmdbv is not None:
self.tsInfo._check_future_rpmdbv = (pkgcount, future_rpmdbv,
ignorenewrpm)
return self.tsInfo.getMembers()
def _remove_old_deps(self):
"""take the set of pkgs being removed and remove any pkgs which are:
1. not required anymore
2. marked as a 'dep' in the 'reason' in the yumdb. """
found_leaves = set()
checked = set()
beingremoved = [ t.po for t in self.tsInfo.getMembersWithState(output_states=TS_REMOVE_STATES) ]
# cache previously examined packages
okay_to_remove = {}
for i in self.rpmdb.returnPackages():
okay_to_remove[i] = True
for pkg in beingremoved: # for each package required by the pkg being removed
#print 'removal: %s' % pkg.name
for required in pkg.required_packages():
#if required in checked:
# continue # if we've already checked it, skip it.
#checked.add(required)
if required.yumdb_info.get('reason', '') != 'dep': # if the required pkg is not a dep, then skip it
okay_to_remove[required] = False
continue
if required in beingremoved:
continue
if self._has_needed_revdeps(required, beingremoved, okay_to_remove):
continue
still_needed = False
for requiring in required.requiring_packages(): # so we have required deps - look at all the pkgs which require them
if requiring == required: # if they are self-requiring skip them
continue
#for tbi_pkg in self.tsInfo.getMembersWithState(output_states=TS_INSTALL_STATES):
# for reqtuple in tbi_pkg.po.requires:
# if required.provides_for(reqtuple):
# still_needed = True
# break
if not still_needed:
print '---> Marking %s to be removed - no longer needed by %s' % (required.name, pkg.name)
txmbrs = self.remove(po=required)
for txmbr in txmbrs:
txmbr.setAsDep(po=pkg)
if txmbr.po not in beingremoved:
beingremoved.append(txmbr.po)
found_leaves.add(txmbr)
self.verbose_logger.log(logginglevels.INFO_2, "Found and removing %s unneeded dependencies" % len(found_leaves))
# Checks if pkg has any reverse deps which cannot be removed.
# Currently this only checks the install reason for each revdep,
# but we may want to check for other reasons that would
# prevent the revdep from being removed (e.g. protected)
def _has_needed_revdeps(self, pkg, beingremoved, ok_to_remove):
# check if we've already found this package to have user-installed deps
if not ok_to_remove[pkg]:
# Debugging output
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s has been visited already and cannot be removed."), pkg)
return True
# Debugging output
self.verbose_logger.log(logginglevels.DEBUG_2, _("Examining revdeps of %s"), pkg)
# track which pkgs we have visited already
visited = {}
for po in self.rpmdb.returnPackages():
visited[po] = False
# no need to consider packages that are already being removed
for po in beingremoved:
visited[po] = True
stack = []
stack.append(pkg)
# depth-first search
while stack:
curpkg = stack[-1]
if not visited[curpkg]:
if not ok_to_remove[curpkg]:
# Debugging output
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s has been visited already and cannot be removed."), pkg)
ok_to_remove[pkg] = False
return True
if curpkg.yumdb_info.get('reason', '') != 'dep':
# Debugging output
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s has revdep %s which was user-installed."), pkg, curpkg)
ok_to_remove[pkg] = False
ok_to_remove[curpkg] = False
return True
# Go through the stuff in the ts to be installed - make sure
# none of that needs the required pkg, either.
for (provn,provf,provevr) in curpkg.provides:
if self.tsInfo.getNewRequires(provn, provf, provevr).keys():
ok_to_remove[pkg] = False
ok_to_remove[curpkg] = False
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s is needed by a package to be installed."), curpkg)
return True
for fn in curpkg.filelist + curpkg.dirlist:
if self.tsInfo.getNewRequires(fn, None,(None,None,None)).keys():
ok_to_remove[pkg] = False
ok_to_remove[curpkg] = False
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s is needed by a package to be installed."), curpkg)
return True
visited[curpkg] = True
all_leaves_visited = True
leaves = curpkg.requiring_packages()
for leaf in leaves:
if not visited[leaf]:
stack.append(leaf)
all_leaves_visited = False
break
if all_leaves_visited:
stack.pop()
# Debugging output
self.verbose_logger.log(logginglevels.DEBUG_2, _("%s has no user-installed revdeps."), pkg)
return False
|
Distrotech/yum
|
yum/__init__.py
|
Python
|
gpl-2.0
| 305,350
|
#
# Copyright 2009-2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
This is the Host Storage Manager module.
"""
import os
import threading
import logging
import glob
from fnmatch import fnmatch
from itertools import imap
from collections import defaultdict
from functools import partial, wraps
import errno
import time
import signal
import types
import math
import numbers
import stat
from vdsm.config import config
import sp
from spbackends import MAX_POOL_DESCRIPTION_SIZE, MAX_DOMAINS
from spbackends import StoragePoolDiskBackend
from spbackends import StoragePoolMemoryBackend
import monitor
import sd
import blockSD
import nfsSD
import glusterSD
import localFsSD
import lvm
import fileUtils
import multipath
import outOfProcess as oop
from sdc import sdCache
import image
import volume
import iscsi
import misc
from misc import deprecated
import taskManager
import clusterlock
import storage_exception as se
from threadLocal import vars
from vdsm import constants
from storageConstants import STORAGE
import resourceManager as rm
from resourceFactories import IMAGE_NAMESPACE
import devicemapper
import logUtils
import mount
import dispatcher
import supervdsm
import storageServer
from vdsm import utils
from vdsm import qemuimg
GUID = "guid"
NAME = "name"
UUID = "uuid"
TYPE = "type"
INITIALIZED = "initialized"
CAPACITY = "capacity"
PATHLIST = "pathlist"
logged = partial(
logUtils.logcall, "dispatcher", "Run and protect: %s",
resPattern="Run and protect: %(name)s, Return response: %(result)s")
rmanager = rm.ResourceManager.getInstance()
# FIXME: moved from spm.py but this should be somewhere else
SECTOR_SIZE = 512
STORAGE_CONNECTION_DIR = os.path.join(constants.P_VDSM_LIB, "connections/")
QEMU_READABLE_TIMEOUT = 30
HSM_DOM_MON_LOCK = "HsmDomainMonitorLock"
def public(f=None, **kwargs):
if f is None:
return partial(public, **kwargs)
publicFunctionLogger = kwargs.get("logger", logged())
return dispatcher.exported(wraps(f)(publicFunctionLogger(f)))
# Connection Management API competability code
# Remove when deprecating dis\connectStorageServer
CON_TYPE_ID_2_CON_TYPE = {
sd.LOCALFS_DOMAIN: 'localfs',
sd.NFS_DOMAIN: 'nfs',
sd.ISCSI_DOMAIN: 'iscsi',
# FCP domain shouldn't even be on the list but VDSM use to just
# accept this type as iscsi so we are stuck with it
sd.FCP_DOMAIN: 'iscsi',
sd.POSIXFS_DOMAIN: 'posixfs',
sd.GLUSTERFS_DOMAIN: 'glusterfs'}
def _updateIfaceNameIfNeeded(iface, netIfaceName):
if iface.netIfaceName is None:
iface.netIfaceName = netIfaceName
iface.update()
return True
return False
def _resolveIscsiIface(ifaceName, initiatorName, netIfaceName):
if not ifaceName:
return iscsi.IscsiInterface('default')
for iface in iscsi.iterateIscsiInterfaces():
if iface.name != ifaceName:
continue
if netIfaceName is not None:
if (not _updateIfaceNameIfNeeded(iface, netIfaceName) and
netIfaceName != iface.netIfaceName):
logging.error('iSCSI netIfaceName coming from engine [%s] '
'is different from iface.net_ifacename '
'present on the system [%s]. Aborting iscsi '
'iface [%s] configuration.' %
(netIfaceName, iface.netIfaceName, iface.name))
raise se.iSCSIifaceError()
return iface
iface = iscsi.IscsiInterface(ifaceName, initiatorName=initiatorName,
netIfaceName=netIfaceName)
iface.create()
return iface
def _connectionDict2ConnectionInfo(conTypeId, conDict):
def getIntParam(optDict, key, default):
res = optDict.get(key, default)
if res is None:
return res
try:
return int(res)
except ValueError:
raise se.InvalidParameterException(key, res)
# FIXME: Remove when nfs_mount_options is no longer supported. This is
# in the compatibility layer so that the NFSConnection class stays clean.
# Engine options have precendence, so use deprecated nfs_mount_options
# only if engine passed nothing (indicated by default params of 'None').
def tryDeprecatedNfsParams(conDict):
if (conDict.get('protocol_version', None),
conDict.get('retrans', None),
conDict.get('timeout', None)) == (None, None, None):
conf_options = config.get(
'irs', 'nfs_mount_options').replace(' ', '')
if (frozenset(conf_options.split(',')) !=
frozenset(storageServer.NFSConnection.DEFAULT_OPTIONS)):
logging.warning("Using deprecated nfs_mount_options from"
" vdsm.conf to mount %s: %s",
conDict.get('connection', '(unknown)'),
conf_options)
return storageServer.PosixFsConnectionParameters(
conDict.get('connection', None), 'nfs', conf_options)
return None
typeName = CON_TYPE_ID_2_CON_TYPE[conTypeId]
if typeName == 'localfs':
params = storageServer.LocaFsConnectionParameters(
conDict.get('connection', None))
elif typeName == 'nfs':
params = tryDeprecatedNfsParams(conDict)
if params is not None:
# Hack to support vdsm.conf nfs_mount_options
typeName = 'posixfs'
else:
version = conDict.get('protocol_version', "3")
version = str(version)
if version == "auto":
version = None
params = storageServer.NfsConnectionParameters(
conDict.get('connection', None),
getIntParam(conDict, 'retrans', None),
getIntParam(conDict, 'timeout', None),
version,
conDict.get('mnt_options', None))
elif typeName == 'posixfs':
params = storageServer.PosixFsConnectionParameters(
conDict.get('connection', None),
conDict.get('vfs_type', None),
conDict.get('mnt_options', None))
elif typeName == 'glusterfs':
params = storageServer.GlusterFsConnectionParameters(
conDict.get('connection', None),
conDict.get('vfs_type', None),
conDict.get('mnt_options', None))
elif typeName == 'iscsi':
portal = iscsi.IscsiPortal(
conDict.get('connection', None),
int(conDict.get('port', None)))
tpgt = int(conDict.get('tpgt', iscsi.DEFAULT_TPGT))
target = iscsi.IscsiTarget(portal, tpgt, conDict.get('iqn', None))
iface = _resolveIscsiIface(conDict.get('ifaceName', None),
conDict.get('initiatorName', None),
conDict.get('netIfaceName', None))
cred = None
username = conDict.get('user', None)
password = conDict.get('password', None)
if username or password:
cred = iscsi.ChapCredentials(username, password)
params = storageServer.IscsiConnectionParameters(target, iface, cred)
else:
raise se.StorageServerActionError()
return storageServer.ConnectionInfo(typeName, params)
class HSM(object):
"""
This is the HSM class. It controls all the stuff relate to the Host.
Further more it doesn't change any pool metadata.
.. attribute:: tasksDir
A string containing the path of the directory where backups of tasks a
saved on the disk.
"""
pools = {}
log = logging.getLogger('Storage.HSM')
@classmethod
def validateSdUUID(cls, sdUUID):
"""
Validate a storage domain.
:param sdUUID: the UUID of the storage domain you want to validate.
:type sdUUID: UUID
"""
sdDom = sdCache.produce(sdUUID=sdUUID)
sdDom.validate()
return sdDom
@classmethod
def validateBackupDom(cls, sdUUID):
"""
Validates a backup domain.
:param sdUUID: the UUID of the storage domain you want to validate.
:type sdUUID: UUID
If the domain doesn't exist an exception will be thrown.
If the domain isn't a backup domain a
:exc:`storage_exception.StorageDomainTypeNotBackup` exception
will be raised.
"""
if not sdCache.produce(sdUUID=sdUUID).isBackup():
raise se.StorageDomainTypeNotBackup(sdUUID)
@classmethod
def validateNonDomain(cls, sdUUID):
"""
Validates that there is no domain with this UUID.
:param sdUUID: The UUID to test.
:type sdUUID: UUID
:raises: :exc:`storage_exception.StorageDomainAlreadyExists` exception
if a domain with this UUID exists.
"""
try:
sdCache.produce(sdUUID=sdUUID)
raise se.StorageDomainAlreadyExists(sdUUID)
# If partial metadata exists the method will throw MetadataNotFound.
# Though correct the logical response in this context
# is StorageDomainNotEmpty.
except se.StorageDomainMetadataNotFound:
raise se.StorageDomainNotEmpty()
except se.StorageDomainDoesNotExist:
pass
def validateSPM(self, spUUID):
pool = self.getPool(spUUID)
if pool.spmRole != sp.SPM_ACQUIRED:
raise se.SpmStatusError(spUUID)
def validateNotSPM(self, spUUID):
pool = self.getPool(spUUID)
if pool.spmRole != sp.SPM_FREE:
raise se.IsSpm(spUUID)
@classmethod
def getPool(cls, spUUID):
if spUUID not in cls.pools:
raise se.StoragePoolUnknown(spUUID)
return cls.pools[spUUID]
def __init__(self):
"""
The HSM Constructor
:param defExcFunc: The function that will set the default exception
for this thread
:type defExcFun: function
"""
self._ready = False
rm.ResourceManager.getInstance().registerNamespace(
STORAGE, rm.SimpleResourceFactory())
self.storage_repository = config.get('irs', 'repository')
self.taskMng = taskManager.TaskManager()
mountBasePath = os.path.join(self.storage_repository,
sd.DOMAIN_MNT_POINT)
fileUtils.createdir(mountBasePath)
storageServer.MountConnection.setLocalPathBase(mountBasePath)
storageServer.LocalDirectoryConnection.setLocalPathBase(mountBasePath)
self._connectionAliasRegistrar = \
storageServer.ConnectionAliasRegistrar(STORAGE_CONNECTION_DIR)
self._connectionMonitor = \
storageServer.ConnectionMonitor(self._connectionAliasRegistrar)
self._connectionMonitor.startMonitoring()
sp.StoragePool.cleanupMasterMount()
self.__releaseLocks()
self._preparedVolumes = defaultdict(list)
self.__validateLvmLockingType()
oop.setDefaultImpl(config.get('irs', 'oop_impl'))
# cleanStorageRepoitory uses tasksDir value, this must be assigned
# before calling it
self.tasksDir = config.get('irs', 'hsm_tasks')
# This part should be in same thread to prevent race on mounted path,
# otherwise, storageRefresh can unlink path that is used by another
# thread that was initiated in the same time and tried to use the
# same link.
try:
# This call won't get stuck if mount is inaccessible thanks to
# misc.walk, this sync call won't delay hsm initialization.
self.__cleanStorageRepository()
except Exception:
self.log.warn("Failed to clean Storage Repository.", exc_info=True)
@utils.traceback(on=self.log.name)
def storageRefresh():
sdCache.refreshStorage()
lvm.bootstrap(refreshlvs=blockSD.SPECIAL_LVS)
self._ready = True
self.log.debug("HSM is ready")
storageRefreshThread = threading.Thread(target=storageRefresh,
name="storageRefresh")
storageRefreshThread.daemon = True
storageRefreshThread.start()
monitorInterval = config.getint('irs', 'sd_health_check_delay')
self.domainMonitor = monitor.DomainMonitor(monitorInterval)
@property
def ready(self):
return self._ready
@public
def registerDomainStateChangeCallback(self, callbackFunc):
"""
Register a state change callback function with the domain monitor.
"""
self.domainMonitor.onDomainStateChange.register(callbackFunc)
def _hsmSchedule(self, name, func, *args):
self.taskMng.scheduleJob("hsm", None, vars.task, name, func, *args)
def __validateLvmLockingType(self):
"""
Check lvm locking type.
"""
rc, out, err = misc.execCmd([constants.EXT_LVM, "dumpconfig",
"global/locking_type"],
sudo=True)
if rc != 0:
self.log.error("Can't validate lvm locking_type. %d %s %s",
rc, out, err)
return False
try:
lvmLockingType = int(out[0].split('=')[1])
except (ValueError, IndexError):
self.log.error("Can't parse lvm locking_type. %s", out)
return False
if lvmLockingType != 1:
self.log.error("Invalid lvm locking_type. %d", lvmLockingType)
return False
return True
def __cleanStorageRepository(self):
"""
Cleanup the storage repository leftovers
"""
self.log.debug("Started cleaning storage "
"repository at '%s'", self.storage_repository)
mountList = []
whiteList = [
self.tasksDir,
os.path.join(self.tasksDir, "*"),
os.path.join(self.storage_repository, 'mnt'),
]
def isInWhiteList(path):
fullpath = os.path.abspath(path)
# The readlink call doesn't follow nested symlinks like
# realpath but it doesn't hang on inaccessible mount points
if os.path.islink(fullpath):
symlpath = os.readlink(fullpath)
# If any os.path.join component is an absolute path all the
# previous paths will be discarded; therefore symlpath will
# be used when it is an absolute path.
basepath = os.path.dirname(fullpath)
fullpath = os.path.abspath(os.path.join(basepath, symlpath))
# Taking advantage of the any lazy evaluation
return any(fnmatch(fullpath, x) for x in whiteList)
# Add mounted folders to mountlist
for mnt in mount.iterMounts():
mountPoint = os.path.abspath(mnt.fs_file)
if mountPoint.startswith(self.storage_repository):
mountList.append(mountPoint)
self.log.debug("White list: %s", whiteList)
self.log.debug("Mount list: %s", mountList)
self.log.debug("Cleaning leftovers")
rmDirList = []
# We can't list files form top to bottom because the process
# would descend into mountpoints and an unreachable NFS storage
# could freeze the vdsm startup. Since we will ignore files in
# mounts anyway using out of process file operations is useless.
# We just clean all directories before removing them from the
# innermost to the outermost.
for base, dirs, files in misc.walk(self.storage_repository,
blacklist=mountList):
for directory in dirs:
fullPath = os.path.join(base, directory)
if isInWhiteList(fullPath):
dirs.remove(directory)
else:
rmDirList.insert(0, os.path.join(base, fullPath))
for fname in files:
fullPath = os.path.join(base, fname)
if isInWhiteList(fullPath):
continue
try:
os.unlink(os.path.join(base, fullPath))
except Exception:
self.log.warn("Cold not delete file "
"'%s'", fullPath, exc_info=True)
for directory in rmDirList:
try:
# os.walk() can see a link to a directory as a directory
if os.path.islink(directory):
os.unlink(directory)
else:
os.rmdir(directory)
except Exception:
self.log.warn("Cold not delete directory "
"'%s'", directory, exc_info=True)
self.log.debug("Finished cleaning storage "
"repository at '%s'", self.storage_repository)
@public
def getConnectedStoragePoolsList(self, options=None):
"""
Get a list of all the connected storage pools.
:param options: Could be one or more of the following:
* OptionA - A good option. Chosen by most
* OptionB - A much more complex option. Only for the brave
:type options: list
"""
vars.task.setDefaultException(se.StoragePoolActionError())
return dict(poollist=self.pools.keys())
@public
def spmStart(self, spUUID, prevID, prevLVER,
maxHostID=clusterlock.MAX_HOST_ID, domVersion=None,
options=None):
"""
Starts an SPM.
:param spUUID: The storage pool you want managed.
:type spUUID: UUID
:param prevID: The previous ID of the SPM that managed this pool.
:type prevID: int
:param prevLVER: The previous version of the pool that was managed by
the SPM.
:type prevLVER: int
:param maxHostID: The maximum Host ID in the cluster.
:type maxHostID: int
:param options: unused
:returns: The UUID of the started task.
:rtype: UUID
"""
vars.task.setDefaultException(se.SpmStartError(
"spUUID=%s, prevID=%s, prevLVER=%s, maxHostID=%s, domVersion=%s"
% (spUUID, prevID, prevLVER, maxHostID, domVersion)))
if domVersion is not None:
domVersion = int(domVersion)
sd.validateDomainVersion(domVersion)
# This code is repeated twice for performance reasons
# Avoid waiting for the lock for validate.
self.getPool(spUUID)
self.validateNotSPM(spUUID)
vars.task.getExclusiveLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
# We should actually just return true if we are SPM after lock,
# but seeing as it would break the API with Engine,
# it's easiest to fail.
self.validateNotSPM(spUUID)
self._hsmSchedule("spmStart", pool.startSpm, prevID, prevLVER,
maxHostID, domVersion)
@public
def spmStop(self, spUUID, options=None):
"""
Stops the SPM functionality.
:param spUUID: The UUID of the storage pool you want to
stop it manager.
:type spUUID: UUID
:param options: ?
:raises: :exc:`storage_exception.TaskInProgress`
if there are tasks running for this pool.
"""
vars.task.setDefaultException(se.SpmStopError(spUUID))
vars.task.getExclusiveLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
pool.stopSpm()
@staticmethod
def _getSpmStatusInfo(pool):
return dict(
zip(('spmStatus', 'spmLver', 'spmId'),
(pool.spmRole,) + pool.getSpmStatus()))
@public
def getSpmStatus(self, spUUID, options=None):
pool = self.getPool(spUUID)
try:
status = self._getSpmStatusInfo(pool)
except (se.LogicalVolumeRefreshError, IOError):
# This happens when we cannot read the MD LV
self.log.error("Can't read LV based metadata", exc_info=True)
raise se.StorageDomainMasterError("Can't read LV based metadata")
except se.StorageException as e:
self.log.error("MD read error: %s", str(e), exc_info=True)
raise se.StorageDomainMasterError("MD read error")
except (KeyError, ValueError):
self.log.error("Non existent or invalid MD key", exc_info=True)
raise se.StorageDomainMasterError("Version or spm id invalid")
return dict(spm_st=status)
@public
def extendVolume(self, sdUUID, spUUID, imgUUID, volumeUUID, size,
isShuttingDown=None, options=None):
"""
Extends an existing volume.
.. note::
This method is valid for SAN only.
:param sdUUID: The UUID of the storage domain that contains the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the new image that is contained
on the volume.
:type imgUUID: UUID
:param volumeUUID: The UUID of the volume you want to extend.
:type volumeUUID: UUID
:param size: Target volume size in MB (desired final size, not by
how much to increase)
:type size: number (anything parsable by int(size))
:param isShuttingDown: ?
:type isShuttingDown: bool
:param options: ?
"""
vars.task.setDefaultException(
se.VolumeExtendingError(
"spUUID=%s, sdUUID=%s, volumeUUID=%s, size=%s" %
(spUUID, sdUUID, volumeUUID, size)))
size = misc.validateN(size, "size")
# ExtendVolume expects size in MB
size = math.ceil(size / 2 ** 20)
pool = self.getPool(spUUID)
pool.extendVolume(sdUUID, volumeUUID, size, isShuttingDown)
@public
def extendVolumeSize(self, spUUID, sdUUID, imgUUID, volUUID, newSize):
pool = self.getPool(spUUID)
newSizeBytes = misc.validateN(newSize, "newSize")
newSizeSectors = (newSizeBytes + SECTOR_SIZE - 1) / SECTOR_SIZE
vars.task.getSharedLock(STORAGE, sdUUID)
self._spmSchedule(
spUUID, "extendVolumeSize", pool.extendVolumeSize, sdUUID,
imgUUID, volUUID, newSizeSectors)
@public
def updateVolumeSize(self, spUUID, sdUUID, imgUUID, volUUID, newSize):
"""
Update the volume size with the given newSize (in bytes).
This synchronous method is intended to be used only with COW volumes
where the size can be updated simply changing the qcow2 header.
"""
newSizeBytes = int(newSize)
domain = sdCache.produce(sdUUID=sdUUID)
volToExtend = domain.produceVolume(imgUUID, volUUID)
volPath = volToExtend.getVolumePath()
volFormat = volToExtend.getFormat()
if not volToExtend.isLeaf():
raise se.VolumeNonWritable(volUUID)
if volFormat != volume.COW_FORMAT:
# This method is used only with COW volumes (see docstring),
# for RAW volumes we just return the volume size.
return dict(size=str(volToExtend.getVolumeSize(bs=1)))
qemuImgFormat = volume.fmt2str(volume.COW_FORMAT)
volToExtend.prepare()
try:
imgInfo = qemuimg.info(volPath, qemuImgFormat)
if imgInfo['virtualsize'] > newSizeBytes:
self.log.error(
"volume %s size %s is larger than the size requested "
"for the extension %s", volUUID, imgInfo['virtualsize'],
newSizeBytes)
raise se.VolumeResizeValueError(str(newSizeBytes))
# Uncommit the current size
volToExtend.setSize(0)
qemuimg.resize(volPath, newSizeBytes, qemuImgFormat)
roundedSizeBytes = qemuimg.info(volPath,
qemuImgFormat)['virtualsize']
finally:
volToExtend.teardown(sdUUID, volUUID)
volToExtend.setSize(
(roundedSizeBytes + SECTOR_SIZE - 1) / SECTOR_SIZE)
return dict(size=str(roundedSizeBytes))
@public
def extendStorageDomain(self, sdUUID, spUUID, guids,
force=False, options=None):
"""
Extends a VG. ?
.. note::
Currently the vg must be a storage domain.
:param sdUUID: The UUID of the storage domain that owns the VG.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the VG.
:type spUUID: UUID
:param guids: The list of device guids you want to extend the VG to.
:type guids: list of device guids. ``[guid1, guid2]``.
:param options: ?
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, devlist=%s" % (sdUUID, guids)))
vars.task.getSharedLock(STORAGE, sdUUID)
# We need to let the domain to extend itself
pool = self.getPool(spUUID)
dmDevs = tuple(os.path.join(devicemapper.DMPATH_PREFIX, guid) for guid
in guids)
pool.extendSD(sdUUID, dmDevs, force)
def _deatchStorageDomainFromOldPools(self, sdUUID):
# We are called with blank pool uuid, to avoid changing exiting
# API which we want to drop in next version anyway.
# So to get the pool we use the fact that there can be only one
# pool, and get the host id from it.
if len(self.pools) > 1:
raise AssertionError("Multiple pools are not supported")
try:
pool = self.pools.values()[0]
except IndexError:
raise se.StoragePoolNotConnected()
dom = sdCache.produce(sdUUID=sdUUID)
dom.acquireHostId(pool.id)
try:
dom.acquireClusterLock(pool.id)
try:
for domPoolUUID in dom.getPools():
dom.detach(domPoolUUID)
finally:
dom.releaseClusterLock()
finally:
dom.releaseHostId(pool.id)
@public
def forcedDetachStorageDomain(self, sdUUID, spUUID, options=None):
"""Forced detach a storage domain from a storage pool.
This removes the storage domain entry in the storage pool meta-data
and leaves the storage domain in 'unattached' status.
This action can only be performed on regular (i.e. non master)
domains.
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, spUUID=%s" % (sdUUID, spUUID)))
if spUUID == sd.BLANK_UUID:
self._deatchStorageDomainFromOldPools(sdUUID)
else:
vars.task.getExclusiveLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
if sdUUID == pool.masterDomain.sdUUID:
raise se.CannotDetachMasterStorageDomain(sdUUID)
pool.forcedDetachSD(sdUUID)
@public
def detachStorageDomain(self, sdUUID, spUUID, msdUUID=None,
masterVersion=None, options=None):
"""
Detaches a storage domain from a storage pool.
This removes the storage domain entry in the storage pool meta-data
and leaves the storage domain in 'unattached' status.
:param sdUUID: The UUID of the storage domain that you want to detach.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the storage
domain being detached.
:type spUUID: UUID
:param msdUUID: Obsolete (was: the UUID of the master domain).
:type msdUUID: UUID
:param masterVersion: Obsolete (was: the version of the pool).
:type masterVersion: int
:param options: ?
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, spUUID=%s, msdUUID=%s, masterVersion=%s" %
(sdUUID, spUUID, msdUUID, masterVersion)))
vars.task.getExclusiveLock(STORAGE, spUUID)
vars.task.getExclusiveLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.detachSD(sdUUID)
@public
def sendExtendMsg(self, spUUID, volDict, newSize, callbackFunc):
"""
Send an extended message?
:param spUUID: The UUID of the storage pool you want to
send the message to.
:type spUUID: UUID
:param volDict: ?
:param newSize: ?
:param callbackFun: A function to run once the operation is done. ?
.. note::
If the pool doesn't exist the function will fail silently and the
callback will never be called.
"""
newSize = misc.validateN(newSize, "newSize") / 2 ** 20
try:
pool = self.getPool(spUUID)
except se.StoragePoolUnknown:
pass
else:
if pool.hsmMailer:
pool.hsmMailer.sendExtendMsg(volDict, newSize, callbackFunc)
def _spmSchedule(self, spUUID, name, func, *args):
self.validateSPM(spUUID)
pool = self.getPool(spUUID)
self.taskMng.scheduleJob("spm", pool.tasksDir, vars.task,
name, func, *args)
@public
def refreshStoragePool(self, spUUID, msdUUID,
masterVersion, options=None):
"""
Refresh the Storage Pool info in HSM.
:param spUUID: The UUID of the storage pool you want to refresh.
:type spUUID: UUID
:param msdUUID: The UUID of the master storage domain.
:type msdUUID: UUID
:param masterVersion: The master version of the storage pool.
:type masterVersion: uint
:param options: Lot of options. ?
:returns: True if everything went as planned.
:rtype: bool
:raises: a :exc:`Storage_Exception.StoragePoolMaterNotFound`
if the storage pool and the master storage domain don't
exist or don't match.
"""
vars.task.setDefaultException(
se.StoragePoolActionError(
"spUUID=%s, msdUUID=%s, masterVersion=%s" %
(spUUID, msdUUID, masterVersion)))
vars.task.getSharedLock(STORAGE, spUUID)
try:
# The refreshStoragePool command is an HSM command and
# should not be issued (and executed) on the SPM. At the
# moment we just ignore it for legacy reasons but in the
# future vdsm could raise an exception.
self.validateNotSPM(spUUID)
except se.IsSpm:
self.log.info("Ignoring the refreshStoragePool request "
"(the host is the SPM)")
return
pool = self.getPool(spUUID)
try:
pool.refresh(msdUUID, masterVersion)
self.validateSdUUID(msdUUID)
except:
self._disconnectPool(pool, pool.id, False)
raise
if pool.hsmMailer:
pool.hsmMailer.flushMessages()
@public
def createStoragePool(self, poolType, spUUID, poolName, masterDom,
domList, masterVersion, lockPolicy=None,
lockRenewalIntervalSec=None, leaseTimeSec=None,
ioOpTimeoutSec=None, leaseRetries=None,
options=None):
"""
Create new storage pool with single/multiple image data domain.
The command will create new storage pool meta-data attach each
storage domain to that storage pool.
At least one data (images) domain must be provided
.. note::
The master domain needs to be also stated in the domain list
:param poolType: The type of the new storage pool.
:type poolType: Some enum?
:param spUUID: The UUID that the new storage pool will have
:type spUUID: UUID
:param poolName: The human readable name of the new pool.
:type poolName: str
:param masterDom: The UUID of the master storage domain that
contains\will contain the pool's metadata.
:type masterDom: UUID
:param domList: A list of all the UUIDs of the storage domains managed
by this storage pool.
:type domList: UUID list
:param masterVersion: The master version of the storage pool meta data.
:type masterVersion: uint
:param lockPolicy: ?
:param lockRenewalIntervalSec: ?
:param leaseTimeSec: ?
:param ioOpTimeoutSec: The default timeout for IO operations
in seconds.?
:type ioOpTimroutSec: uint
:param leaseRetries: ?
:param options: ?
:returns: The newly created storage pool object.
:rtype: :class:`sp.StoragePool`
:raises: an :exc:`Storage_Exception.InvalidParameterException` if the
master domain is not supplied in the domain list.
"""
leaseParams = sd.packLeaseParams(
lockRenewalIntervalSec=lockRenewalIntervalSec,
leaseTimeSec=leaseTimeSec,
ioOpTimeoutSec=ioOpTimeoutSec,
leaseRetries=leaseRetries)
vars.task.setDefaultException(
se.StoragePoolCreationError(
"spUUID=%s, poolName=%s, masterDom=%s, domList=%s, "
"masterVersion=%s, clusterlock params: (%s)" %
(spUUID, poolName, masterDom, domList, masterVersion,
leaseParams)))
misc.validateUUID(spUUID, 'spUUID')
if masterDom not in domList:
raise se.InvalidParameterException("masterDom", str(masterDom))
if len(poolName) > MAX_POOL_DESCRIPTION_SIZE:
raise se.StoragePoolDescriptionTooLongError()
msd = sdCache.produce(sdUUID=masterDom)
msdType = msd.getStorageType()
msdVersion = msd.getVersion()
if (msdType in sd.BLOCK_DOMAIN_TYPES and
msdVersion in blockSD.VERS_METADATA_LV and
len(domList) > MAX_DOMAINS):
raise se.TooManyDomainsInStoragePoolError()
vars.task.getExclusiveLock(STORAGE, spUUID)
for dom in sorted(domList):
vars.task.getExclusiveLock(STORAGE, dom)
pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng)
pool.setBackend(StoragePoolDiskBackend(pool))
return pool.create(poolName, masterDom, domList, masterVersion,
leaseParams)
@public
def connectStoragePool(self, spUUID, hostID, msdUUID, masterVersion,
domainsMap=None, options=None):
"""
Connect a Host to a specific storage pool.
:param spUUID: The UUID of the storage pool you want to connect to.
:type spUUID: UUID
:param hostID: The hostID to be used for clustered locking.
:type hostID: int
:param msdUUID: The UUID for the pool's master domain.
:type msdUUID: UUID
:param masterVersion: The expected master version. Used for validation.
:type masterVersion: int
:param options: ?
:returns: :keyword:`True` if connection was successful.
:rtype: bool
:raises: :exc:`storage_exception.ConnotConnectMultiplePools` when
storage pool is not connected to the system.
"""
vars.task.setDefaultException(
se.StoragePoolConnectionError(
"spUUID=%s, msdUUID=%s, masterVersion=%s, hostID=%s, "
"domainsMap=%s" %
(spUUID, msdUUID, masterVersion, hostID, domainsMap)))
with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK,
rm.LockType.exclusive):
return self._connectStoragePool(
spUUID, hostID, msdUUID, masterVersion, domainsMap)
@staticmethod
def _updateStoragePool(pool, hostId, msdUUID, masterVersion, domainsMap):
if hostId != pool.id:
raise se.StoragePoolConnected(
"hostId=%s, newHostId=%s" % (pool.id, hostId))
if domainsMap is None:
if not isinstance(pool.getBackend(), StoragePoolDiskBackend):
raise se.StoragePoolConnected('Cannot downgrade pool backend')
else:
if isinstance(pool.getBackend(), StoragePoolMemoryBackend):
pool.getBackend().updateVersionAndDomains(
masterVersion, domainsMap)
else:
# Live pool backend upgrade
pool.setBackend(
StoragePoolMemoryBackend(pool, masterVersion, domainsMap))
pool.refresh(msdUUID, masterVersion)
def _connectStoragePool(self, spUUID, hostID, msdUUID,
masterVersion, domainsMap=None, options=None):
misc.validateUUID(spUUID, 'spUUID')
# TBD: To support multiple pool connection on single host,
# we'll need to remove this validation
if len(self.pools) and spUUID not in self.pools:
raise se.CannotConnectMultiplePools(str(self.pools.keys()))
try:
self.getPool(spUUID)
except se.StoragePoolUnknown:
pass # pool not connected yet
else:
with rmanager.acquireResource(STORAGE, spUUID, rm.LockType.shared):
# FIXME: this breaks in case of a race as it assumes that the
# pool is still available. At the moment we maintain this
# behavior as it's inherited from the previous implementation
# but the problem must be addressed (possibly improving the
# entire locking pattern used in this method).
self._updateStoragePool(self.getPool(spUUID), hostID, msdUUID,
masterVersion, domainsMap)
return True
with rmanager.acquireResource(STORAGE, spUUID, rm.LockType.exclusive):
try:
pool = self.getPool(spUUID)
except se.StoragePoolUnknown:
pass # pool not connected yet
else:
self._updateStoragePool(pool, hostID, msdUUID, masterVersion,
domainsMap)
return True
pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng)
pool.backend = StoragePoolDiskBackend(pool)
if domainsMap is None:
pool.setBackend(StoragePoolDiskBackend(pool))
else:
pool.setBackend(
StoragePoolMemoryBackend(pool, masterVersion, domainsMap))
res = pool.connect(hostID, msdUUID, masterVersion)
if res:
self.pools[spUUID] = pool
return res
@public
def disconnectStoragePool(self, spUUID, hostID, remove=False,
options=None):
"""
Disconnect a Host from a specific storage pool.
:param spUUID: The UUID of the storage pool you want to disconnect.
:type spUUID: UUID
:param hostID: The ID of the host you want to disconnect the pool from.
:type hostID: int
:param remove: ?
:type remove: bool
:param options: ?
:returns: :keyword:`True` if disconnection was successful.
:rtype: bool
.. note::
if storage pool is not connected or doesn't exist the operation
will log and exit silently.
"""
vars.task.setDefaultException(
se.StoragePoolDisconnectionError(
"spUUID=%s, hostID=%s" % (spUUID, hostID)))
misc.validateN(hostID, 'hostID')
# already disconnected/or pool is just unknown - return OK
try:
pool = self.getPool(spUUID)
except se.StoragePoolUnknown:
self.log.warning("disconnect sp: %s failed. Known pools %s",
spUUID, self.pools)
return
self.validateNotSPM(spUUID)
vars.task.getExclusiveLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
return self._disconnectPool(pool, hostID, remove)
def _disconnectPool(self, pool, hostID, remove):
self.validateNotSPM(pool.spUUID)
with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK,
rm.LockType.exclusive):
res = pool.disconnect()
del self.pools[pool.spUUID]
return res
@public
def destroyStoragePool(self, spUUID, hostID, options=None):
"""
Destroy a storage pool.
The command will detach all inactive domains from the pool
and delete the pool with all its links.
:param spUUID: The UUID of the storage pool you want to destroy.
:type spUUID: UUID
:param hostID: The ID of the host managing this storage pool. ?
:type hostID: int
:param options: ?
"""
vars.task.setDefaultException(
se.StoragePoolDestroyingError(
"spUUID=%s, hostID=%s" % (spUUID, hostID)))
self.log.info("spUUID=%s", spUUID)
pool = self.getPool(spUUID)
if not pool.id == hostID:
raise se.HostIdMismatch(spUUID)
vars.task.getExclusiveLock(STORAGE, pool.spUUID)
# Find out domain list from the pool metadata
domList = sorted(pool.getDomains().keys())
for sdUUID in domList:
vars.task.getExclusiveLock(STORAGE, sdUUID)
pool.detachAllDomains()
return self._disconnectPool(pool, hostID, remove=True)
@public
def attachStorageDomain(self, sdUUID, spUUID, options=None):
"""
Attach a storage domain to a storage pool.
This marks the storage domain as status 'attached' and link it to the
storage pool
.. note::
The target domain must be accessible in this point
(storage connected)
:param sdUUID: The UUID of the storage domain that you want to attach.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the storage
domain being attached.
:type spUUID: UUID
:param options: ?
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, spUUID=%s" % (sdUUID, spUUID)))
vars.task.getExclusiveLock(STORAGE, spUUID)
vars.task.getExclusiveLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.attachSD(sdUUID)
@public
def deactivateStorageDomain(self, sdUUID, spUUID, msdUUID,
masterVersion, options=None):
"""
1. Deactivates a storage domain.
2. Validates that the storage domain is owned by the storage pool.
3. Disables access to that storage domain.
4. Changes storage domain status to 'Inactive' in the storage pool
meta-data.
.. note::
Disconnected storage domains are not monitored by the host.
:param sdUUID: The UUID of the storage domain that you want to
deactivate.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the storage
domain being deactivated.
:type spUUID: UUID
:param msdUUID: The UUID of the master domain.
:type msdUUID: UUID
:param masterVersion: The version of the pool.
:type masterVersion: int
:param options: ?
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, spUUID=%s, msdUUID=%s, masterVersion=%s" %
(sdUUID, spUUID, msdUUID, masterVersion)
)
)
vars.task.getExclusiveLock(STORAGE, spUUID)
vars.task.getExclusiveLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.deactivateSD(sdUUID, msdUUID, masterVersion)
@public
def activateStorageDomain(self, sdUUID, spUUID, options=None):
"""
Activates a storage domain that is already a member in a storage pool.
:param sdUUID: The UUID of the storage domain that you want to
activate.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the storage
domain being activated.
:type spUUID: UUID
:param options: ?
"""
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, spUUID=%s" % (sdUUID, spUUID)))
vars.task.getExclusiveLock(STORAGE, spUUID)
vars.task.getExclusiveLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.activateSD(sdUUID)
@public
def setStoragePoolDescription(self, spUUID, description, options=None):
"""
Sets the storage pool's description.
:param spUUID: The UUID of the storage pool that you want to set it's
description.
:type spUUID: UUID
:param description: A human readable description of the storage pool.
:type description: str
:param options: ?
"""
vars.task.setDefaultException(
se.StoragePoolActionError(
"spUUID=%s, descr=%s" % (spUUID, description)))
vars.task.getExclusiveLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
if isinstance(pool.getBackend(), StoragePoolDiskBackend):
pool.getBackend().setDescription(description)
@public
def setVolumeDescription(self, sdUUID, spUUID, imgUUID, volUUID,
description, options=None):
"""
Sets a Volume's Description
:param spUUID: The UUID of the storage pool that contains the volume
being modified.
:type spUUID: UUID
:param sdUUID: The UUID of the storage domain that contains the volume.
:type sdUUID: UUID
:param imgUUID: The UUID of the image that is contained on the volume.
:type imgUUID: UUID
:param volUUID: The UUID of the volume you want to modify.
:type volUUID: UUID
:param description: The new human readable description of the volume.
:type description: str
"""
vars.task.getSharedLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.setVolumeDescription(sdUUID, imgUUID, volUUID, description)
@public
def setVolumeLegality(self, sdUUID, spUUID, imgUUID, volUUID, legality,
options=None):
"""
Sets a Volume's Legality
:param spUUID: The UUID of the storage pool that contains the volume
being modified.
:type spUUID: UUID
:param sdUUID: The UUID of the storage domain that contains the volume.
:type sdUUID: UUID
:param imgUUID: The UUID of the image that is contained on the volume.
:type imgUUID: UUID
:param volUUID: The UUID of the volume you want to modify.
:type volUUID: UUID
:param description: The legality status ot the volume.?
:type description: ?
"""
vars.task.getSharedLock(STORAGE, sdUUID)
pool = self.getPool(spUUID)
pool.setVolumeLegality(sdUUID, imgUUID, volUUID, legality)
@public
def updateVM(self, spUUID, vmList, sdUUID=None, options=None):
"""
Updates a VM list in a storage pool or in a Backup domain.
Creates the VMs if a domain with the specified UUID does not exist.
.. note::
Should be called by VDC for every change of VM (add or remove
snapshots, updates, ...)
:param spUUID: The UUID of the storage pool that contains the VMs
being updated or created.
:type spUUID: UUID
:param vmList: The list of VMs being updated.?
:type vmList: list
:param sdUUID: The UUID of the backup domain you want to update or
:keyword:`None` if you want something something. ?
:type sdUUID: UUID
:param options: ?
"""
vars.task.getSharedLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
if not sdUUID or sdUUID == sd.BLANK_UUID:
sdUUID = pool.masterDomain.sdUUID
vars.task.getExclusiveLock(STORAGE, "vms_" + sdUUID)
pool.updateVM(vmList=vmList, sdUUID=sdUUID)
@public
def removeVM(self, spUUID, vmUUID, sdUUID=None, options=None):
"""
Removes a VM list from a storage pool or from a Backup domain.
:param spUUID: The UUID of the storage pool that contains the VMs
being removed.
:type spUUID: UUID
:param vmUUID: The UUID of VM being removed.
:type vmUUID: UUID
:param sdUUID: The UUID of the backup domain you want to update or
:keyword:`None` if you want something something. ?
:type sdUUID: UUID
:param options: ?
"""
vars.task.getSharedLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
if not sdUUID or sdUUID == sd.BLANK_UUID:
sdUUID = pool.masterDomain.sdUUID
vars.task.getSharedLock(STORAGE, "vms_" + sdUUID)
vars.task.getExclusiveLock(STORAGE, "vms_%s_%s" % (vmUUID, sdUUID))
pool.removeVM(vmUUID=vmUUID, sdUUID=sdUUID)
@public
def getVmsList(self, spUUID, sdUUID=None, options=None):
"""
Gets a list of VMs from the pool.
If 'sdUUID' is given and it's a backup domain the function will get
the list of VMs from it
:param spUUID: The UUID of the storage pool that you want to query.
:type spUUID: UUID
:param sdUUID: The UUID of the backup domain that the you want to
query or :keyword:`None`.
:type sdUUID: UUID
:param options: ?
"""
pool = self.getPool(spUUID)
if not sdUUID or sdUUID == sd.BLANK_UUID:
sdUUID = pool.masterDomain.sdUUID
vars.task.getSharedLock(STORAGE, sdUUID)
vms = pool.getVmsList(sdUUID)
return dict(vmlist=vms)
@public
def getVmsInfo(self, spUUID, sdUUID, vmList=None, options=None):
"""
Gets a list of VMs with their info from the pool.
* If 'sdUUID' is given and it's a backup domain then get the list of
VMs from it.
* If 'vmList' is given get info for these VMs only.
:param spUUID: The UUID of the storage pool that you want to query.
:type spUUID: UUID
:param sdUUID: The UUID of the backup domain that the you want to
query or :keyword:`None`.
:type sdUUID: UUID
:param vmList: A UUID list of the VMs you want info on or
:keyword:`None` for all VMs in pool or backup domain.
:param options: ?
"""
pool = self.getPool(spUUID)
if sdUUID and sdUUID != sd.BLANK_UUID:
# Only backup domains are allowed in this path
self.validateBackupDom(sdUUID)
else:
sdUUID = pool.masterDomain.sdUUID
vars.task.getSharedLock(STORAGE, sdUUID)
vms = pool.getVmsInfo(sdUUID, vmList)
return dict(vmlist=vms)
@public
def createVolume(self, sdUUID, spUUID, imgUUID, size, volFormat,
preallocate, diskType, volUUID, desc,
srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID):
"""
Create a new volume
Function Type: SPM
Parameters:
Return Value:
"""
argsStr = ("sdUUID=%s, spUUID=%s, imgUUID=%s, size=%s, volFormat=%s, "
"preallocate=%s, diskType=%s, volUUID=%s, desc=%s, "
"srcImgUUID=%s, srcVolUUID=%s" %
(sdUUID, spUUID, imgUUID, size, volFormat, preallocate,
diskType, volUUID, desc,
srcImgUUID, srcVolUUID))
vars.task.setDefaultException(se.VolumeCreationError(argsStr))
# Validates that the pool is connected. WHY?
pool = self.getPool(spUUID)
dom = sdCache.produce(sdUUID=sdUUID)
misc.validateUUID(imgUUID, 'imgUUID')
misc.validateUUID(volUUID, 'volUUID')
# TODO: For backwards compatibility, we need to support accepting
# number of sectors as int type Updated interface is accepting string
# type in bytes (ugly, get rid of this when possible)
if not isinstance(size, types.IntType):
size = misc.validateN(size, "size")
size = (size + SECTOR_SIZE - 1) / SECTOR_SIZE
if srcImgUUID:
misc.validateUUID(srcImgUUID, 'srcImgUUID')
if srcVolUUID:
misc.validateUUID(srcVolUUID, 'srcVolUUID')
# Validate volume type and format
dom.validateCreateVolumeParams(volFormat, srcVolUUID,
preallocate=preallocate)
vars.task.getSharedLock(STORAGE, sdUUID)
self._spmSchedule(spUUID, "createVolume", pool.createVolume, sdUUID,
imgUUID, size, volFormat, preallocate, diskType,
volUUID, desc, srcImgUUID, srcVolUUID)
@public
def deleteVolume(self, sdUUID, spUUID, imgUUID, volumes, postZero=False,
force=False):
"""
Delete a volume
"""
argsStr = "sdUUID=%s, spUUID=%s, imgUUID=%s, volumes=%s, " \
"postZero=%s, force=%s" % (sdUUID, spUUID, imgUUID, volumes,
postZero, force)
vars.task.setDefaultException(se.CannotDeleteVolume(argsStr))
# Validates that the pool is connected. WHY?
pool = self.getPool(spUUID)
misc.validateUUID(imgUUID, 'imgUUID')
vars.task.getSharedLock(STORAGE, sdUUID)
self._spmSchedule(spUUID, "deleteVolume", pool.deleteVolume, sdUUID,
imgUUID, volumes, misc.parseBool(postZero),
misc.parseBool(force))
@public
def deleteImage(self, sdUUID, spUUID, imgUUID, postZero=False,
force=False):
"""
Delete Image folder with all volumes
force parameter is deprecated and not evaluated.
"""
# vars.task.setDefaultException(se.ChangeMeError("%s" % args))
pool = self.getPool(spUUID)
dom = sdCache.produce(sdUUID=sdUUID)
# Taking an exclusive lock on both imgUUID and sdUUID since
# an image can exist on two SDs concurrently (e.g. during LSM flow);
# hence, we need a unique identifier.
vars.task.getExclusiveLock(STORAGE, "%s_%s" % (imgUUID, sdUUID))
vars.task.getSharedLock(STORAGE, sdUUID)
allVols = dom.getAllVolumes()
volsByImg = sd.getVolsOfImage(allVols, imgUUID)
if not volsByImg:
self.log.error("Empty or not found image %s in SD %s. %s",
imgUUID, sdUUID, allVols)
raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)
# on data domains, images should not be deleted if they are templates
# being used by other images.
fakeTUUID = None
for k, v in volsByImg.iteritems():
if len(v.imgs) > 1 and v.imgs[0] == imgUUID:
if dom.isBackup():
fakeTUUID = k
else:
raise se.CannotDeleteSharedVolume("Cannot delete shared "
"image %s. volImgs: %s" %
(imgUUID, volsByImg))
break
# zeroImage will delete zeroed volumes at the end.
if misc.parseBool(postZero):
# postZero implies block domain. Backup domains are always NFS
# hence no need to create fake template if postZero is true.
self._spmSchedule(spUUID, "zeroImage_%s" % imgUUID, dom.zeroImage,
sdUUID, imgUUID, volsByImg)
else:
if fakeTUUID:
tParams = dom.produceVolume(imgUUID, fakeTUUID).\
getVolumeParams()
pool.deleteImage(dom, imgUUID, volsByImg)
# This is a hack to keep the interface consistent
# We currently have race conditions in delete image, to quickly fix
# this we delete images in the "synchronous" state. This only works
# because Engine does not send two requests at a time. This hack is
# intended to quickly fix the integration issue with Engine. In 2.3
# we should use the new resource system to synchronize the process
# an eliminate all race conditions
if fakeTUUID:
img = image.Image(os.path.join(self.storage_repository,
spUUID))
img.createFakeTemplate(sdUUID=sdUUID, volParams=tParams)
self._spmSchedule(spUUID, "deleteImage_%s" % imgUUID, lambda: True)
def validateImageMove(self, srcDom, dstDom, imgUUID):
"""
Determines if the image move is legal.
Moving an image based on a template to a data domain is only allowed if
the template exists on the target domain.
Moving a template from a data domain is only allowed if there are no
images based on it in the source data domain.
"""
srcAllVols = srcDom.getAllVolumes()
dstAllVols = dstDom.getAllVolumes()
# Filter volumes related to this image
srcVolsImgs = sd.getVolsOfImage(srcAllVols, imgUUID)
# Find the template
for volName, imgsPar in srcVolsImgs.iteritems():
if len(imgsPar.imgs) > 1:
# This is the template. Should be only one.
tName, tImgs = volName, imgsPar.imgs
# Template self image is the 1st entry
if imgUUID != tImgs[0] and tName not in dstAllVols.keys():
self.log.error(
"img %s can't be moved to dom %s because template "
"%s is absent on it", imgUUID, dstDom.sdUUID, tName)
e = se.ImageDoesNotExistInSD(imgUUID, dstDom.sdUUID)
e.absentTemplateUUID = tName
e.absentTemplateImageUUID = tImgs[0]
raise e
elif imgUUID == tImgs[0] and not srcDom.isBackup():
raise se.MoveTemplateImageError(imgUUID)
break
return True
@public
def moveImage(self, spUUID, srcDomUUID, dstDomUUID, imgUUID, vmUUID,
op, postZero=False, force=False):
"""
Move/Copy image between storage domains within same storage pool
"""
argsStr = ("spUUID=%s, srcDomUUID=%s, dstDomUUID=%s, imgUUID=%s, "
"vmUUID=%s, op=%s, force=%s, postZero=%s force=%s" %
(spUUID, srcDomUUID, dstDomUUID, imgUUID, vmUUID, op,
force, postZero, force))
vars.task.setDefaultException(se.MoveImageError("%s" % argsStr))
if srcDomUUID == dstDomUUID:
raise se.InvalidParameterException(
"srcDom", "must be different from dstDom: %s" % argsStr)
srcDom = sdCache.produce(sdUUID=srcDomUUID)
dstDom = sdCache.produce(sdUUID=dstDomUUID)
# Validates that the pool is connected. WHY?
pool = self.getPool(spUUID)
try:
self.validateImageMove(srcDom, dstDom, imgUUID)
except se.ImageDoesNotExistInSD as e:
if not dstDom.isBackup():
raise
else:
# Create an ad-hoc fake template only on a backup SD
tName = e.absentTemplateUUID
tImgUUID = e.absentTemplateImageUUID
tParams = srcDom.produceVolume(tImgUUID,
tName).getVolumeParams()
image.Image(os.path.join(self.storage_repository, spUUID)
).createFakeTemplate(dstDom.sdUUID, tParams)
domains = [srcDomUUID, dstDomUUID]
domains.sort()
for dom in domains:
vars.task.getSharedLock(STORAGE, dom)
self._spmSchedule(
spUUID, "moveImage_%s" % imgUUID, pool.moveImage, srcDomUUID,
dstDomUUID, imgUUID, vmUUID, op, misc.parseBool(postZero),
misc.parseBool(force))
@public
def sparsifyImage(self, spUUID, tmpSdUUID, tmpImgUUID, tmpVolUUID,
dstSdUUID, dstImgUUID, dstVolUUID):
"""
Reduce sparse image size by converting free space on image to free
space on storage domain using virt-sparsify.
"""
pool = self.getPool(spUUID)
sdUUIDs = sorted(set((tmpSdUUID, dstSdUUID)))
for dom in sdUUIDs:
vars.task.getSharedLock(STORAGE, dom)
self._spmSchedule(spUUID, "sparsifyImage", pool.sparsifyImage,
tmpSdUUID, tmpImgUUID, tmpVolUUID, dstSdUUID,
dstImgUUID, dstVolUUID)
@public
def cloneImageStructure(self, spUUID, sdUUID, imgUUID, dstSdUUID):
"""
Clone an image structure (volume chain) to a destination domain within
the same pool.
"""
sdCache.produce(sdUUID=sdUUID)
sdCache.produce(sdUUID=dstSdUUID)
for dom in sorted((sdUUID, dstSdUUID)):
vars.task.getSharedLock(STORAGE, dom)
pool = self.getPool(spUUID)
self._spmSchedule(spUUID, "cloneImageStructure",
pool.cloneImageStructure, sdUUID, imgUUID, dstSdUUID)
@public
def syncImageData(self, spUUID, sdUUID, imgUUID, dstSdUUID, syncType):
"""
Copy the internal data between image structures (volume chain) within
the same pool.
"""
sdCache.produce(sdUUID=sdUUID)
sdCache.produce(sdUUID=dstSdUUID)
for dom in sorted((sdUUID, dstSdUUID)):
vars.task.getSharedLock(STORAGE, dom)
pool = self.getPool(spUUID)
self._spmSchedule(spUUID, "syncImageData", pool.syncImageData,
sdUUID, imgUUID, dstSdUUID, syncType)
@public
def uploadImage(self, methodArgs, spUUID, sdUUID, imgUUID, volUUID=None):
"""
Upload an image to a remote endpoint using the specified method and
methodArgs.
"""
sdCache.produce(sdUUID)
pool = self.getPool(spUUID)
# NOTE: this could become an hsm task
self._spmSchedule(spUUID, "uploadImage", pool.uploadImage,
methodArgs, sdUUID, imgUUID, volUUID)
@public
def downloadImage(self, methodArgs, spUUID, sdUUID, imgUUID, volUUID=None):
"""
Download an image from a remote endpoint using the specified method
and methodArgs.
"""
sdCache.produce(sdUUID)
pool = self.getPool(spUUID)
# NOTE: this could become an hsm task, in such case the LV extension
# required to prepare the destination should go through the mailbox.
self._spmSchedule(spUUID, "downloadImage", pool.downloadImage,
methodArgs, sdUUID, imgUUID, volUUID)
@public
def uploadImageToStream(self, methodArgs, callback, startEvent, spUUID,
sdUUID, imgUUID, volUUID=None):
"""
Uploads an image to a stream.
Warning: Internal use only.
"""
sdCache.produce(sdUUID)
pool = self.getPool(spUUID)
# NOTE: this could become an hsm task
self._spmSchedule(spUUID, "uploadImageToStream",
pool.uploadImageToStream, methodArgs, callback,
startEvent, sdUUID, imgUUID, volUUID)
@public
def downloadImageFromStream(self, methodArgs, callback, spUUID, sdUUID,
imgUUID, volUUID=None):
"""
Download an image from a stream.
Warning: Internal use only.
"""
sdCache.produce(sdUUID)
pool = self.getPool(spUUID)
# NOTE: this could become an hsm task, in such case the LV extension
# required to prepare the destination should go through the mailbox.
self._spmSchedule(spUUID, "downloadImageFromStream",
pool.downloadImageFromStream, methodArgs, callback,
sdUUID, imgUUID, volUUID)
@deprecated
@public
def moveMultipleImages(self, spUUID, srcDomUUID, dstDomUUID, imgDict,
vmUUID, force=False):
"""
Move multiple images between storage domains within same storage pool
"""
argsStr = ("spUUID=%s, srcDomUUID=%s, dstDomUUID=%s, imgDict=%s, "
"vmUUID=%s force=%s" %
(spUUID, srcDomUUID, dstDomUUID, imgDict, vmUUID, force))
vars.task.setDefaultException(
se.MultipleMoveImageError("%s" % argsStr))
if srcDomUUID == dstDomUUID:
raise se.InvalidParameterException("dstDomUUID", dstDomUUID)
# Validates that the pool is connected. WHY?
pool = self.getPool(spUUID)
srcDom = sdCache.produce(sdUUID=srcDomUUID)
dstDom = sdCache.produce(sdUUID=dstDomUUID)
images = {}
for (imgUUID, pZero) in imgDict.iteritems():
images[imgUUID.strip()] = misc.parseBool(pZero)
try:
self.validateImageMove(srcDom, dstDom, imgUUID)
except se.ImageDoesNotExistInSD as e:
if not dstDom.isBackup():
raise
else:
# Create an ad-hoc fake template only on a backup SD
tName = e.absentTemplateUUID
tImgUUID = e.absentTemplateImageUUID
tParams = srcDom.produceVolume(tImgUUID,
tName).getVolumeParams()
image.Image(os.path.join(self.storage_repository, spUUID)
).createFakeTemplate(dstDom.sdUUID, tParams)
domains = sorted([srcDomUUID, dstDomUUID])
for dom in domains:
vars.task.getSharedLock(STORAGE, dom)
self._spmSchedule(
spUUID, "moveMultipleImages", pool.moveMultipleImages,
srcDomUUID, dstDomUUID, images, vmUUID, misc.parseBool(force))
@public
def copyImage(
self, sdUUID, spUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, description='', dstSdUUID=sd.BLANK_UUID,
volType=volume.SHARED_VOL, volFormat=volume.UNKNOWN_VOL,
preallocate=volume.UNKNOWN_VOL, postZero=False, force=False):
"""
Create new template/volume from VM.
Do it by collapse and copy the whole chain (baseVolUUID->srcVolUUID)
"""
argsStr = ("sdUUID=%s, spUUID=%s, vmUUID=%s, srcImgUUID=%s, "
"srcVolUUID=%s, dstImgUUID=%s, dstVolUUID=%s, "
"description=%s, dstSdUUID=%s, volType=%s, volFormat=%s, "
"preallocate=%s force=%s, postZero=%s" %
(sdUUID, spUUID, vmUUID, srcImgUUID, srcVolUUID,
dstImgUUID, dstVolUUID, description, dstSdUUID, volType,
volFormat, preallocate, force, postZero))
vars.task.setDefaultException(se.TemplateCreationError("%s" % argsStr))
# Validate imgUUID in case of copy inside source domain itself
if dstSdUUID in (sdUUID, sd.BLANK_UUID):
if srcImgUUID == dstImgUUID:
raise se.InvalidParameterException("dstImgUUID", dstImgUUID)
pool = self.getPool(spUUID)
sdCache.produce(sdUUID=sdUUID)
# Avoid VM copy if one of its volume (including template if exists)
# ILLEGAL/FAKE
pool.validateVolumeChain(sdUUID, srcImgUUID)
# Validate volume type and format
if dstSdUUID != sd.BLANK_UUID:
dom = dstSdUUID
else:
dom = sdUUID
sdCache.produce(dom).validateCreateVolumeParams(
volFormat, volume.BLANK_UUID, preallocate)
# If dstSdUUID defined, means we copy image to it
domains = [sdUUID]
if dstSdUUID not in [sdUUID, sd.BLANK_UUID]:
sdCache.produce(sdUUID=dstSdUUID)
domains.append(dstSdUUID)
domains.sort()
for dom in domains:
vars.task.getSharedLock(STORAGE, dom)
self._spmSchedule(
spUUID, "copyImage_%s" % dstImgUUID, pool.copyImage, sdUUID,
vmUUID, srcImgUUID, srcVolUUID, dstImgUUID, dstVolUUID,
description, dstSdUUID, volType, volFormat, preallocate,
misc.parseBool(postZero), misc.parseBool(force))
@public
def imageSyncVolumeChain(self, sdUUID, imgUUID, volUUID, newChain):
"""
Update storage metadata for an image chain after a live merge
completes. Since this is called from the HSM where the VM is running,
we cannot modify the LVM tag that stores the parent UUID for block
volumes. In this case we update the chain in the metadata LV only.
The LV tag will be fixed when the unlinked volume is deleted by an SPM.
"""
argsStr = ("sdUUID=%s, imgUUID=%s, volUUID=%s, newChain=%s" %
(sdUUID, imgUUID, volUUID, newChain))
vars.task.setDefaultException(se.StorageException("%s" % argsStr))
sdDom = sdCache.produce(sdUUID=sdUUID)
repoPath = os.path.join(self.storage_repository, sdDom.getPools()[0])
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
rm.LockType.shared):
image.Image(repoPath).syncVolumeChain(sdUUID, imgUUID, volUUID,
newChain)
@public
def reconcileVolumeChain(self, spUUID, sdUUID, imgUUID, leafVolUUID):
"""
In some situations (such as when a live merge is interrupted), the
vdsm volume chain could become out of sync with the actual chain as
understood by qemu. This API uses qemu-img to determine the correct
chain and synchronizes vdsm metadata accordingly. Returns the correct
volume chain. NOT for use on images of running VMs.
"""
argsStr = ("spUUID=%s, sdUUID=%s, imgUUID=%s, leafVolUUID=%s" %
(spUUID, sdUUID, imgUUID, leafVolUUID))
vars.task.setDefaultException(se.StorageException("%s" % argsStr))
pool = self.getPool(spUUID)
sdCache.produce(sdUUID=sdUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
return pool.reconcileVolumeChain(sdUUID, imgUUID, leafVolUUID)
@public
def mergeSnapshots(self, sdUUID, spUUID, vmUUID, imgUUID, ancestor,
successor, postZero=False):
"""
Merge source volume to the destination volume.
"""
argsStr = ("sdUUID=%s, spUUID=%s, vmUUID=%s, imgUUID=%s, "
"ancestor=%s, successor=%s, postZero=%s" %
(sdUUID, spUUID, vmUUID, imgUUID, ancestor, successor,
postZero))
vars.task.setDefaultException(se.MergeSnapshotsError("%s" % argsStr))
pool = self.getPool(spUUID)
sdCache.produce(sdUUID=sdUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
self._spmSchedule(
spUUID, "mergeSnapshots", pool.mergeSnapshots, sdUUID, vmUUID,
imgUUID, ancestor, successor, misc.parseBool(postZero))
@public
def reconstructMaster(self, spUUID, poolName, masterDom, domDict,
masterVersion, lockPolicy=None,
lockRenewalIntervalSec=None, leaseTimeSec=None,
ioOpTimeoutSec=None, leaseRetries=None, hostId=None,
options=None):
"""
Reconstruct Master Domains - rescue action: can be issued even when
pool is not connected.
:param spUUID: The UUID of the storage pool you want to reconstruct.
:type spUUID: UUID
:param masterDom: The new master domain UUID.
:type masterDom: UUID
:param domDict: Dict. of domain and statuses
``{'sdUUID1':status1, 'sdUUID2':status2}``
:type domDict: dict
:param masterVersion: The new version of master domain.
:type masterVersion: int
:param lockPolicy: ?
:param lockRenewalIntervalSec: ?
:param leaseTimeSec: ?
:param ioOpTimeoutSec: The timeout of IO operations in seconds. ?
:type ioOpTimeoutSec: int
:param leaseRetries: ?
:param hostId: The host id to be used during the reconstruct process.
:param options: ?
:returns: Nothing ? pool.reconstructMaster return nothing
:rtype: ?
"""
leaseParams = sd.packLeaseParams(
lockRenewalIntervalSec=lockRenewalIntervalSec,
leaseTimeSec=leaseTimeSec,
ioOpTimeoutSec=ioOpTimeoutSec,
leaseRetries=leaseRetries
)
vars.task.setDefaultException(
se.ReconstructMasterError(
"spUUID=%s, masterDom=%s, masterVersion=%s, clusterlock "
"params: (%s)" % (spUUID, masterDom, masterVersion,
leaseParams)))
self.log.info("spUUID=%s master=%s", spUUID, masterDom)
try:
pool = self.getPool(spUUID)
except se.StoragePoolUnknown:
pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng)
pool.setBackend(StoragePoolDiskBackend(pool))
else:
raise se.StoragePoolConnected(spUUID)
self.validateSdUUID(masterDom)
if hostId is not None:
misc.validateN(hostId, 'hostId')
vars.task.getExclusiveLock(STORAGE, spUUID)
for d, status in domDict.iteritems():
misc.validateUUID(d)
try:
sd.validateSDStatus(status)
except:
domDict[d] = sd.validateSDDeprecatedStatus(status)
return pool.reconstructMaster(hostId, poolName, masterDom, domDict,
masterVersion, leaseParams)
@public
def getDeviceList(self, storageType=None, options={}):
"""
List all Block Devices.
:param storageType: Filter by storage type.
:type storageType: Some enum?
:param options: ?
:returns: Dict containing a list of all the devices of the storage
type specified.
:rtype: dict
"""
vars.task.setDefaultException(se.BlockDeviceActionError())
devices = self._getDeviceList(storageType)
return dict(devList=devices)
def _getDeviceList(self, storageType=None, guids=None):
sdCache.refreshStorage()
typeFilter = lambda dev: True
if storageType:
if sd.storageType(storageType) == sd.type2name(sd.ISCSI_DOMAIN):
typeFilter = \
lambda dev: multipath.devIsiSCSI(dev.get("devtype"))
elif sd.storageType(storageType) == sd.type2name(sd.FCP_DOMAIN):
typeFilter = \
lambda dev: multipath.devIsFCP(dev.get("devtype"))
devices = []
pvs = {}
if guids is not None:
for guid in guids:
try:
pv = lvm.getPV(guid)
except se.StorageException:
self.log.warning("getPV failed for guid: %s", guid,
exc_info=True)
else:
pvs[os.path.basename(pv.name)] = pv
else:
for pv in lvm.getAllPVs():
pvs[os.path.basename(pv.name)] = pv
# FIXME: pathListIter() should not return empty records
for dev in multipath.pathListIter(guids):
if not typeFilter(dev):
continue
pv = pvs.get(dev.get('guid', ""))
if pv is not None:
pvuuid = pv.uuid
pvsize = pv.size
vguuid = pv.vg_uuid
else:
pvuuid = ""
pvsize = ""
vguuid = ""
devInfo = {'GUID': dev.get("guid", ""), 'pvUUID': pvuuid,
'pvsize': str(pvsize),
'vgUUID': vguuid, 'vendorID': dev.get("vendor", ""),
'productID': dev.get("product", ""),
'fwrev': dev.get("fwrev", ""),
"serial": dev.get("serial", ""),
'capacity': dev.get("capacity", "0"),
'devtype': dev.get("devtype", ""),
'pathstatus': dev.get("paths", []),
'pathlist': dev.get("connections", []),
'logicalblocksize': dev.get("logicalblocksize", ""),
'physicalblocksize': dev.get("physicalblocksize", "")}
devices.append(devInfo)
# Look for devices that will probably fail if pvcreated.
devNamesToPVTest = tuple(dev["GUID"] for dev in devices)
unusedDevs, usedDevs = lvm.testPVCreate(
devNamesToPVTest, metadataSize=blockSD.VG_METADATASIZE)
# Assuming that unusables v unusables = None
free = tuple(os.path.basename(d) for d in unusedDevs)
used = tuple(os.path.basename(d) for d in usedDevs)
for dev in devices:
guid = dev['GUID']
if guid in free:
dev['status'] = "free"
elif guid in used:
dev['status'] = "used"
else:
raise KeyError("pvcreate response foresight is "
"can not be determined for %s", dev)
return devices
@public
def getDevicesVisibility(self, guids, options=None):
"""
Check which of the luns with specified guids are visible
:param guids: List of device GUIDs to check.
:type guids: list
:param options: ?
:returns: dictionary of specified guids and respective visibility
boolean
:rtype: dict
"""
def _isVisible(guid):
try:
res = (os.stat('/dev/mapper/' + guid).st_mode &
stat.S_IRUSR != 0)
except OSError as e:
if e.errno != errno.ENOENT:
raise
res = False
return res
visibility = [_isVisible(guid) for guid in guids]
if not all(visibility):
multipath.rescan()
visibility = [_isVisible(guid) for guid in guids]
visibility = dict(zip(guids, visibility))
# After multipath.rescan, existing devices may disapper, and new
# devices may appear, making lvm filter stale.
lvm.invalidateFilter()
return {'visible': visibility}
@public
def createVG(self, vgname, devlist, force=False, options=None):
"""
Creates a volume group with the name 'vgname' out of the devices in
'devlist'
:param vgname: The human readable name of the vg.
:type vgname: str
:param devlist: A list of devices to be included in the VG.
The devices must be unattached.
:type devlist: list
:param options: ?
:returns: the UUID of the new VG.
:rtype: UUID
"""
MINIMALVGSIZE = 10 * 1024 * constants.MEGAB
vars.task.setDefaultException(
se.VolumeGroupCreateError(str(vgname), str(devlist)))
misc.validateUUID(vgname, 'vgname')
# getSharedLock(connectionsResource...)
knowndevs = set(os.path.basename(p) for p
in multipath.getMPDevNamesIter())
size = 0
devices = []
unknowndevs = []
for dev in devlist:
if dev in knowndevs:
devices.append(dev)
size += multipath.getDeviceSize(devicemapper.getDmId(dev))
else:
unknowndevs.append(dev)
if unknowndevs:
raise se.InaccessiblePhysDev(unknowndevs)
# Minimal size check
if size < MINIMALVGSIZE:
raise se.VolumeGroupSizeError(
"VG size must be more than %s MiB" %
str(MINIMALVGSIZE / constants.MEGAB))
lvm.createVG(vgname, devices, blockSD.STORAGE_UNREADY_DOMAIN_TAG,
metadataSize=blockSD.VG_METADATASIZE,
force=(force is True) or (isinstance(force, str) and
(force.capitalize() == "True")))
return dict(uuid=lvm.getVG(vgname).uuid)
@deprecated
@public
def removeVG(self, vgUUID, options=None):
"""
DEPRECATED: formatSD effectively removes the VG.
Removes a volume group.
:param vgUUID: The UUID of the VG you want removed.
:type vgUUID: UUID
:param options: ?
"""
vars.task.setDefaultException(se.VolumeGroupActionError("%s" % vgUUID))
# getSharedLock(connectionsResource...)
try:
lvm.removeVGbyUUID(vgUUID)
except se.VolumeGroupDoesNotExist:
pass
@public
def getTaskStatus(self, taskID, spUUID=None, options=None):
"""
Gets the status of a task.
:param taskID: The ID of the task you want the check.
:type taskID: ID?
:param spUUID: the UUID of the storage pool that the task is
operating on. ??
:type spUUID: UUID (deprecated)
:param options: ?
:returns: a dict containing the status information of the task.
:rtype: dict
"""
# getSharedLock(tasksResource...)
taskStatus = self.taskMng.getTaskStatus(taskID=taskID)
return dict(taskStatus=taskStatus)
@public
def getAllTasksStatuses(self, spUUID=None, options=None):
"""
Gets the status of all public tasks.
:param spUUID: The UUID of the storage pool that you
want to check it's tasks.
:type spUUID: UUID (deprecated)
:options: ?
"""
# getSharedLock(tasksResource...)
try:
sp = self.pools.values()[0]
except IndexError:
raise se.SpmStatusError()
allTasksStatus = sp.getAllTasksStatuses()
return dict(allTasksStatus=allTasksStatus)
@public
def getTaskInfo(self, taskID, spUUID=None, options=None):
"""
Gets information about a Task.
:param taskID: The ID of the task you want to get info on.
:type taskID: ID ?
:param spUUID: The UUID of the storage pool that owns this task. ?
:type spUUID: UUID (deprecated)
:para options: ?
:returns: a dict with information about the task.
:rtype: dict
:raises: :exc:`storage_exception.UnknownTask` if a task with the
specified taskID doesn't exist.
"""
# getSharedLock(tasksResource...)
inf = self.taskMng.getTaskInfo(taskID=taskID)
return dict(TaskInfo=inf)
@public
def getAllTasksInfo(self, spUUID=None, options=None):
"""
Get the information of all the tasks in a storage pool.
:param spUUID: The UUID of the storage pool you that want to check
it's tasks info.
:type spUUID: UUID (deprecated)
:param options: ?
:returns: a dict of all the tasks information.
:rtype: dict
"""
# getSharedLock(tasksResource...)
try:
sp = self.pools.values()[0]
except IndexError:
raise se.SpmStatusError()
allTasksInfo = sp.getAllTasksInfo()
return dict(allTasksInfo=allTasksInfo)
@public
def getAllTasks(self):
"""
Get the information for all tasks in the system.
:returns: A dict of all tasks' information.
:rtype: dict
"""
ret = self.taskMng.getAllTasks()
return dict(tasks=ret)
@public
def stopTask(self, taskID, spUUID=None, options=None):
"""
Stops a task.
:param taskID: The ID of the task you want to stop.
:type taskID: ID?
:param spUUID: The UUID of the storage pool that owns the task.
:type spUUID: UUID (deprecated)
:options: ?
:returns: :keyword:`True` if task was stopped successfully.
:rtype: bool
"""
force = False
if options:
try:
force = options.get("force", False)
except:
self.log.warning("options %s are ignored" % options)
# getExclusiveLock(tasksResource...)
return self.taskMng.stopTask(taskID=taskID, force=force)
@public
def clearTask(self, taskID, spUUID=None, options=None):
"""
Clears a task. ?
:param taskID: The ID of the task you want to clear.
:type taskID: ID?
:param spUUID: The UUID of the storage pool that owns this task.
:type spUUID: UUID (deprecated)
:options: ?
:returns: :keyword:`True` if task was cleared successfully.
:rtype: bool
"""
# getExclusiveLock(tasksResource...)
return self.taskMng.clearTask(taskID=taskID)
@public
def revertTask(self, taskID, spUUID=None, options=None):
"""
Revert a task.
:param taskID: The ID of the task you want to clear.
:type taskID: ID?
:param spUUID: The UUID of the storage pool that owns this task.
:type spUUID: UUID (deprecated)
:options: ?
:returns:
:rtype:
"""
# getExclusiveLock(tasksResource...)
return self.taskMng.revertTask(taskID=taskID)
@public
def getFileStats(self, sdUUID, pattern='*', caseSensitive=False,
options=None):
"""
Returns statistics of all files in the domain filtered according to
pattern.
:param sdUUID: The UUID of the storage domain you want to query.
:type sdUUID: UUID
:param pattern: The glob expression for filtering.
:type pattern: str
:param caseSensitive: Enables case-sensitive matching.
:type caseSensitive: bool
:options: ?
:returns: file statistics for files matching pattern.
:rtype: dict
"""
vars.task.setDefaultException(se.GetFileStatsError(sdUUID))
vars.task.getSharedLock(STORAGE, sdUUID)
dom = sdCache.produce(sdUUID=sdUUID)
if not dom.isISO or dom.getStorageType() not in sd.FILE_DOMAIN_TYPES:
raise se.GetFileStatsError(sdUUID)
fileStats = dom.getFileList(pattern=pattern,
caseSensitive=caseSensitive)
return {'fileStats': fileStats}
@public
def getIsoList(self, spUUID, extension='iso', options=None):
"""
Gets a list of all ISO/Floppy volumes in a storage pool.
:param spUUID: The UUID of the storage pool you want to query.
:type spUUID: UUID
:param extension: ?
:type extension: str
:options: ?
:returns: a dict of all the volumes found.
:rtype: dict
"""
vars.task.setDefaultException(se.GetIsoListError(spUUID))
vars.task.getSharedLock(STORAGE, spUUID)
isoDom = self.getPool(spUUID).getIsoDomain()
if not isoDom:
raise se.GetIsoListError(spUUID)
# Get full iso files dictionary
isodict = isoDom.getFileList(pattern='*.' + extension,
caseSensitive=False)
# Get list of iso images with proper permissions only
isolist = [key for key, value in isodict.items()
if isodict[key]['status'] == 0]
return {'isolist': isolist}
@public
def getFloppyList(self, spUUID, options=None):
"""
Gets a list of all Floppy volumes if a storage pool.
:param spUUID: The UUID of the storage pool you want to query.
:type spUUID: UUID
:param options: ?
:returns: a dict of all the floppy volumes found.
:rtype: dict
"""
vars.task.setDefaultException(se.GetFloppyListError("%s" % spUUID))
return self.getIsoList(spUUID=spUUID, extension='vfd')
def __getSDTypeFindMethod(self, domType):
# TODO: make sd.domain_types a real dictionary and remove this.
# Storage Domain Types find methods
SDTypeFindMethod = {sd.NFS_DOMAIN: nfsSD.findDomain,
sd.FCP_DOMAIN: blockSD.findDomain,
sd.ISCSI_DOMAIN: blockSD.findDomain,
sd.LOCALFS_DOMAIN: localFsSD.findDomain,
sd.POSIXFS_DOMAIN: nfsSD.findDomain,
sd.GLUSTERFS_DOMAIN: glusterSD.findDomain}
return SDTypeFindMethod.get(domType)
def __prefetchDomains(self, domType, conObj):
uuidPatern = "????????-????-????-????-????????????"
if domType in (sd.FCP_DOMAIN, sd.ISCSI_DOMAIN):
uuids = tuple(blockSD.getStorageDomainsList())
elif domType is sd.NFS_DOMAIN:
lPath = conObj._mountCon._getLocalPath()
self.log.debug("nfs local path: %s", lPath)
goop = oop.getGlobalProcPool()
uuids = tuple(os.path.basename(d) for d in
goop.glob.glob(os.path.join(lPath, uuidPatern)))
elif domType is sd.POSIXFS_DOMAIN:
lPath = conObj._getLocalPath()
self.log.debug("posix local path: %s", lPath)
goop = oop.getGlobalProcPool()
uuids = tuple(os.path.basename(d) for d in
goop.glob.glob(os.path.join(lPath, uuidPatern)))
elif domType is sd.GLUSTERFS_DOMAIN:
glusterDomPath = os.path.join(sd.GLUSTERSD_DIR, "*")
self.log.debug("glusterDomPath: %s", glusterDomPath)
uuids = tuple(sdUUID for sdUUID, domainPath in
nfsSD.fileSD.scanDomains(glusterDomPath))
elif domType is sd.LOCALFS_DOMAIN:
lPath = conObj._path
self.log.debug("local _path: %s", lPath)
uuids = tuple(os.path.basename(d) for d in
glob.glob(os.path.join(lPath, uuidPatern)))
else:
uuids = tuple()
self.log.warn("domType %s does not support prefetch")
self.log.debug("Found SD uuids: %s", uuids)
findMethod = self.__getSDTypeFindMethod(domType)
return dict.fromkeys(uuids, findMethod)
@deprecated
@public
def connectStorageServer(self, domType, spUUID, conList, options=None):
"""
Connects to a storage low level entity (server).
:param domType: The type of the connection sometimes expressed as the
corresponding domain type
:param spUUID: deprecated, unused
:param conList: A list of connections. Each connection being a dict
with keys depending on the type
:type conList: list
:param options: unused
:returns: a list of statuses status will be 0 if connection was
successful
:rtype: dict
"""
vars.task.setDefaultException(
se.StorageServerConnectionError(
"domType=%s, spUUID=%s, conList=%s" %
(domType, spUUID, conList)))
res = []
for conDef in conList:
conInfo = _connectionDict2ConnectionInfo(domType, conDef)
conObj = storageServer.ConnectionFactory.createConnection(conInfo)
try:
self._connectStorageOverIser(conDef, conObj, domType)
conObj.connect()
except Exception as err:
self.log.error(
"Could not connect to storageServer", exc_info=True)
status, _ = self._translateConnectionError(err)
else:
status = 0
try:
doms = self.__prefetchDomains(domType, conObj)
except:
self.log.debug("prefetch failed: %s",
sdCache.knownSDs, exc_info=True)
else:
# Any pre-existing domains in sdCache stand the chance of
# being invalid, since there is no way to know what happens
# to them while the storage is disconnected.
for sdUUID in doms.iterkeys():
sdCache.manuallyRemoveDomain(sdUUID)
sdCache.knownSDs.update(doms)
self.log.debug("knownSDs: {%s}", ", ".join("%s: %s.%s" %
(k, v.__module__, v.__name__)
for k, v in sdCache.knownSDs.iteritems()))
res.append({'id': conDef["id"], 'status': status})
# Connecting new device may change the visible storage domain list
# so invalidate caches
sdCache.invalidateStorage()
return dict(statuslist=res)
@deprecated
def _connectStorageOverIser(self, conDef, conObj, conTypeId):
"""
Tries to connect the storage server over iSER.
This applies if the storage type is iSCSI and 'iser' is in
the configuration option 'iscsi_default_ifaces'.
"""
# FIXME: remove this method when iface selection is in higher interface
typeName = CON_TYPE_ID_2_CON_TYPE[conTypeId]
if typeName == 'iscsi' and 'initiatorName' not in conDef:
ifaces = config.get('irs', 'iscsi_default_ifaces').split(',')
if 'iser' in ifaces:
conObj._iface = iscsi.IscsiInterface('iser')
try:
conObj.connect()
conObj.disconnect()
except:
conObj._iface = iscsi.IscsiInterface('default')
@deprecated
@public
def disconnectStorageServer(self, domType, spUUID, conList, options=None):
"""
Disconnects from a storage low level entity (server).
:param domType: The type of the connection expressed as the sometimes
corresponding domains type
:param spUUID: deprecated, unused
:param conList: A list of connections. Each connection being a dict
with keys depending on the type
:type conList: list
:param options: unused
:returns: a list of statuses status will be 0 if disconnection was
successful
:rtype: dict
"""
vars.task.setDefaultException(
se.StorageServerDisconnectionError(
"domType=%s, spUUID=%s, conList=%s" %
(domType, spUUID, conList)))
res = []
for conDef in conList:
conInfo = _connectionDict2ConnectionInfo(domType, conDef)
conObj = storageServer.ConnectionFactory.createConnection(conInfo)
try:
conObj.disconnect()
status = 0
except Exception as err:
self.log.error("Could not disconnect from storageServer",
exc_info=True)
status, _ = self._translateConnectionError(err)
res.append({'id': conDef["id"], 'status': status})
# Disconnecting a device may change the visible storage domain list
# so invalidate the caches
sdCache.refreshStorage()
return dict(statuslist=res)
def _translateConnectionError(self, e):
if e is None:
return 0, ""
if isinstance(e, mount.MountError):
return se.MountError.code, se.MountError.message
if isinstance(e, iscsi.iscsiadm.IscsiAuthenticationError):
return se.iSCSILoginAuthError.code, se.iSCSILoginAuthError.message
if isinstance(e, iscsi.iscsiadm.IscsiInterfaceError):
return se.iSCSIifaceError.code, se.iSCSIifaceError.message
if isinstance(e, iscsi.iscsiadm.IscsiError):
return se.iSCSISetupError.code, se.iSCSISetupError.message
if hasattr(e, 'code'):
return e.code, e.message
return se.GeneralException.code, str(e)
@public
def storageServer_ConnectionRefs_statuses(self):
"""
Gets a list of all managed and active unmanaged storage connections and
their current status.
:rtype: a dict in the format of
{id: {'connected': True/False,
'lastError': (errcode, message),
'connectionInfo': :class:: storageServer.ConnectionInfo}
"""
vars.task.setDefaultException(se.StorageServerActionError())
res = {}
conMonitor = self._connectionMonitor
managedConnections = conMonitor.getMonitoredConnectionsDict()
for conId, con in managedConnections.iteritems():
conErr = conMonitor.getLastError(conId)
errInfo = self._translateConnectionError(conErr)
conInfo = self._connectionAliasRegistrar.getConnectionInfo(conId)
params = conInfo.params
if conInfo.type == 'iscsi':
conInfoDict = {'target':
{'portal':
misc.namedtuple2dict(params.target.portal),
'tpgt': params.target.tpgt,
'iqn': params.target.iqn},
'iface': params.iface.name}
else:
conInfoDict = misc.namedtuple2dict(params)
for key in conInfoDict:
if conInfoDict[key] is None:
conInfoDict[key] = 'default'
r = {"connected": con.isConnected(),
"lastError": errInfo,
"connectionInfo": {
"type": conInfo.type,
"params": conInfoDict}}
res[conId] = r
return dict(connectionslist=res)
@public
def getStoragePoolInfo(self, spUUID, options=None):
"""
Gets info about a storage pool.
:param spUUID: The UUID of the storage pool you want to get info on.
:type spUUID: UUID
:param options: ?
:returns: getPool(spUUID).getInfo
"""
vars.task.setDefaultException(
se.StoragePoolActionError("spUUID=%s" % spUUID))
vars.task.getSharedLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
poolInfo = pool.getInfo()
doms = pool.getDomains()
domInfo = self._getDomsStats(pool.domainMonitor, doms)
for sdUUID in doms.iterkeys():
if domInfo[sdUUID]['isoprefix']:
poolInfo['isoprefix'] = domInfo[sdUUID]['isoprefix']
break
else:
poolInfo['isoprefix'] = '' # No ISO domain found
return dict(info=poolInfo, dominfo=domInfo)
@public
def createStorageDomain(self, storageType, sdUUID, domainName,
typeSpecificArg, domClass,
domVersion=constants.SUPPORTED_DOMAIN_VERSIONS[0],
options=None):
"""
Creates a new storage domain.
:param storageType: The storage type of the new storage
domain (eg. NFS).
:type storageType: int (as defined in sd.py).
:param sdUUID: The UUID of the new storage domain.
:type sdUUID: UUID
:param domainName: The human readable name of the new storage domain.
:type domainName: str
:param typeSpecificArg: Arguments that are specific to the
storage type.
:type typeSpecificArg: dict
:param domClass: The class of the new storage domain (eg. iso, data).
:type domClass: int (as defined in sd.py)
:param options: unused
"""
msg = ("storageType=%s, sdUUID=%s, domainName=%s, "
"domClass=%s, typeSpecificArg=%s domVersion=%s" %
(storageType, sdUUID, domainName, domClass,
typeSpecificArg, domVersion))
domVersion = int(domVersion)
vars.task.setDefaultException(se.StorageDomainCreationError(msg))
misc.validateUUID(sdUUID, 'sdUUID')
self.validateNonDomain(sdUUID)
if domClass not in sd.DOMAIN_CLASSES.keys():
raise se.StorageDomainClassError()
sd.validateDomainVersion(domVersion)
# getSharedLock(connectionsResource...)
# getExclusiveLock(sdUUID...)
if storageType in sd.BLOCK_DOMAIN_TYPES:
newSD = blockSD.BlockStorageDomain.create(
sdUUID, domainName, domClass, typeSpecificArg, storageType,
domVersion)
elif storageType in (sd.NFS_DOMAIN, sd.POSIXFS_DOMAIN):
newSD = nfsSD.NfsStorageDomain.create(
sdUUID, domainName, domClass, typeSpecificArg, storageType,
domVersion)
elif storageType == sd.GLUSTERFS_DOMAIN:
newSD = glusterSD.GlusterStorageDomain.create(
sdUUID, domainName, domClass, typeSpecificArg, storageType,
domVersion)
elif storageType == sd.LOCALFS_DOMAIN:
newSD = localFsSD.LocalFsStorageDomain.create(
sdUUID, domainName, domClass, typeSpecificArg, storageType,
domVersion)
else:
raise se.StorageDomainTypeError(storageType)
findMethod = self.__getSDTypeFindMethod(storageType)
sdCache.knownSDs[sdUUID] = findMethod
self.log.debug("knownSDs: {%s}", ", ".join("%s: %s.%s" %
(k, v.__module__, v.__name__)
for k, v in sdCache.knownSDs.iteritems()))
sdCache.manuallyAddDomain(newSD)
@public
def validateStorageDomain(self, sdUUID, options=None):
"""
Validates that the storage domain is accessible.
:param sdUUID: The UUID of the storage domain you want to validate.
:type sdUUID: UUID
:param options: ?
:returns: :keyword:`True` if storage domain is valid.
:rtype: bool
"""
vars.task.setDefaultException(
se.StorageDomainCreationError("sdUUID=%s" % sdUUID))
return sdCache.produce(sdUUID=sdUUID).validate()
# TODO: Remove this function when formatStorageDomain() is removed.
def _recycle(self, dom):
sdUUID = dom.sdUUID
try:
dom.format(dom.sdUUID)
# dom is a DomainProxy, attribute operations will trigger the
# domain added to sdCache again. Delete the local variable binding
# here to avoid visiting its attribute accidentally.
del dom
finally:
try:
sdCache.manuallyRemoveDomain(sdUUID)
except KeyError:
self.log.warn("Storage domain %s doesn't exist in cache. "
"Leftovers are recycled.", sdUUID)
@public
def formatStorageDomain(self, sdUUID, autoDetach=False, options=None):
"""
Formats a detached storage domain.
.. warning::
This removes all data from the storage domain.
:param sdUUID: The UUID for the storage domain you want to format.
:param autoDetach: DEPRECATED
:type sdUUID: UUID
:param options: ?
:returns: Nothing
"""
multipath.rescan()
vars.task.setDefaultException(
se.StorageDomainActionError("sdUUID=%s" % sdUUID))
# getSharedLock(connectionsResource...)
vars.task.getExclusiveLock(STORAGE, sdUUID)
for p in self.pools.values():
# Avoid format if domain part of connected pool
domDict = p.getDomains()
if sdUUID in domDict.keys():
raise se.CannotFormatStorageDomainInConnectedPool(sdUUID)
# For domains that attached to disconnected pool, format domain if
# 'autoDetach' flag set
sd = sdCache.produce(sdUUID=sdUUID)
try:
sd.invalidateMetadata()
# TODO: autoDetach is True
if not misc.parseBool(autoDetach) and sd.getPools():
raise se.CannotFormatAttachedStorageDomain(sdUUID)
# Allow format also for broken domain
except (se.StorageDomainMetadataNotFound, se.MetaDataGeneralError,
se.MiscFileReadException, se.MiscBlockReadException,
se.MiscBlockReadIncomplete) as e:
self.log.warn("Domain %s has problem with metadata. Continue "
"formatting... (%s)", sdUUID, e)
self._recycle(sd)
@public
def setStorageDomainDescription(self, sdUUID, description, options=None):
"""
Sets a storage domain's description.
:param sdUUID: The UUID of the storage domain you want to modify.
:type sdUUID: UUID
:param description: The new description.
:type description: str
:param options: ?
"""
if len(description) > sd.MAX_DOMAIN_DESCRIPTION_SIZE:
raise se.StorageDomainDescriptionTooLongError()
vars.task.setDefaultException(
se.StorageDomainActionError(
"sdUUID=%s, description=%s" % (sdUUID, description)))
dom = sdCache.produce(sdUUID=sdUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
pool = self.getPool(dom.getPools()[0])
pool.setSDDescription(dom, description)
@public
def getStorageDomainInfo(self, sdUUID, options=None):
"""
Gets the info of a storage domain.
:param sdUUID: The UUID of the storage domain you want to get
info about.
:type sdUUID: UUID
:param options: ?
:returns: a dict containing the information about the domain.
:rtype: dict
"""
vars.task.setDefaultException(
se.StorageDomainActionError("sdUUID=%s" % sdUUID))
dom = self.validateSdUUID(sdUUID)
# getSharedLock(connectionsResource...)
vars.task.getSharedLock(STORAGE, sdUUID)
return dict(info=dom.getInfo())
@public
def getStorageDomainStats(self, sdUUID, options=None):
"""
Gets a storage domain's statistics.
:param sdUUID: The UUID of the storage domain that you want to get
it's statistics.
:type sdUUID: UUID
:param options: ?
:returns: a dict containing the statistics information.
:rtype: dict
"""
vars.task.setDefaultException(
se.StorageDomainActionError("sdUUID=%s" % sdUUID))
vars.task.getSharedLock(STORAGE, sdUUID)
dom = sdCache.produce(sdUUID=sdUUID)
dom.refresh()
stats = dom.getStats()
return dict(stats=stats)
@public
def getStorageDomainsList(
self, spUUID=None, domainClass=None, storageType=None,
remotePath=None, options=None):
"""
Returns a List of all or pool specific storage domains.
:param spUUID: The UUID of the the the storage pool you want to list.
If spUUID equals to :attr:`~volume.BLANK_UUID` all
pools will be listed.
:type spUUID: UUID
:param options: ?
:returns: a dict containing list of storage domains.
:rtype: dict
"""
vars.task.setDefaultException(
se.StorageDomainActionError("spUUID: %s" % spUUID))
sdCache.refreshStorage()
if spUUID and spUUID != volume.BLANK_UUID:
domList = self.getPool(spUUID).getDomains()
domains = domList.keys()
else:
# getSharedLock(connectionsResource...)
domains = sdCache.getUUIDs()
for sdUUID in domains[:]:
try:
dom = sdCache.produce(sdUUID=sdUUID)
# Filter domains according to 'storageType'
if storageType and storageType != dom.getStorageType():
domains.remove(sdUUID)
continue
# Filter domains according to 'domainClass'
if domainClass and domainClass != dom.getDomainClass():
domains.remove(sdUUID)
continue
# Filter domains according to 'remotePath'
if (remotePath and
fileUtils.transformPath(remotePath) !=
dom.getRemotePath()):
domains.remove(sdUUID)
continue
except Exception:
self.log.error("Unexpected error", exc_info=True)
domains.remove(sdUUID)
continue
return dict(domlist=domains)
def __fillPVDict(self, devInfo, pv, devtype):
info = {}
info["vendorID"] = devInfo["vendor"]
info["productID"] = devInfo["product"]
info["serial"] = devInfo["serial"]
info["pathstatus"] = []
for pathInfo in devInfo['paths']:
info["pathstatus"].append(pathInfo)
info["pathlist"] = devInfo["connections"]
info["fwrev"] = "0000"
info["devtype"] = devtype
info["capacity"] = str(pv.size)
info["devcapacity"] = str(pv.dev_size)
info["vgUUID"] = str(pv.vg_uuid)
info["pvUUID"] = str(pv.uuid)
info["GUID"] = str(pv.guid)
return info
@public
def getVGList(self, storageType=None, options=None):
"""
Returns a list all VGs.
:param options: ?
:returns: a dict containing a list of all VGs.
:rtype: dict
"""
vars.task.setDefaultException(se.VolumeGroupActionError())
sdCache.refreshStorage()
# getSharedLock(connectionsResource...)
vglist = []
vgs = self.__getVGsInfo()
for vgInfo in vgs:
del vgInfo["pvlist"]
if storageType is not None:
if vgInfo["type"] != storageType:
continue
vglist.append(vgInfo)
return dict(vglist=vglist)
def __getVGsInfo(self, vgUUIDs=None):
getGuid = lambda pvName: os.path.split(pvName)[-1]
devNames = []
vgInfos = []
vgGuids = {}
if vgUUIDs is None:
vgList = lvm.getAllVGs()
else:
vgList = [lvm.getVGbyUUID(vgUUID) for vgUUID in vgUUIDs]
for i, vg in enumerate(vgList):
# Should be fresh from the cache
devNames.extend(imap(getGuid, lvm.listPVNames(vg.name)))
# dict(vg.attr._asdict()) because nametuples and OrderedDict are
# not properly marshalled
vgInfo = {'name': vg.name, 'vgUUID': vg.uuid,
'vgsize': str(vg.size), 'vgfree': str(vg.free),
'type': "", 'attr': dict(vg.attr._asdict()),
'state': vg.partial, "pvlist": []}
vgInfos.append(vgInfo)
vgGuids[vg.uuid] = i
pathDict = {}
for dev in multipath.pathListIter(devNames):
pathDict[dev["guid"]] = dev
self.__processVGInfos(vgInfos, pathDict, getGuid)
return vgInfos
def __processVGInfos(self, vgInfos, pathDict, getGuid):
vgType = None
for vgInfo in vgInfos:
for pv in lvm.listPVNames(vgInfo['name']):
dev = pathDict.get(getGuid(pv))
if dev is None:
self.log.warn("dev %s was not found %s",
getGuid(pv), pathDict)
continue
if vgType is None:
vgType = dev["devtype"]
elif (vgType != multipath.DEV_MIXED and
vgType != dev["devtype"]):
vgType = multipath.DEV_MIXED
pvInfo = lvm.getPV(pv)
vgInfo['pvlist'].append(self.__fillPVDict(dev, pvInfo, vgType))
if vgType == multipath.DEV_FCP:
vgType = sd.FCP_DOMAIN
elif vgType == multipath.DEV_ISCSI:
vgType = sd.ISCSI_DOMAIN
else:
# TODO: Allow for mixed vgs to be specified as such in the API
vgType = sd.ISCSI_DOMAIN
vgInfo["type"] = vgType
@public
def getVGInfo(self, vgUUID, options=None):
"""
Gets the info of a VG.
:param vgUUID: The UUID of the VG.
:type vgUUID: UUID
:param options: ?
:returns: a dict containing the info about the VG.
:rtype: dict
:raises: :exc:`storage_exception.VolumeGroupDoesNotExist`
if no VG with the specified UUID is found
"""
vars.task.setDefaultException(se.VolumeGroupActionError("%s" % vgUUID))
# getSharedLock(connectionsResource...)
return dict(info=self.__getVGsInfo([vgUUID])[0])
@public
def discoverSendTargets(self, con, options=None):
"""
Discovers iSCSI targets.
:param con: A dict containing connection information of some sort.?
:type con: dict?
:param options: ?
:returns: a dict containing the send targets that were discovered.
:rtype: dict
"""
ip = con['connection']
port = int(con['port'])
username = con['user']
password = con['password']
if username == "":
username = password = None
iface = iscsi.IscsiInterface("default")
portal = iscsi.IscsiPortal(ip, port)
cred = None
if username or password:
cred = iscsi.ChapCredentials(username, password)
try:
targets = iscsi.discoverSendTargets(iface, portal, cred)
except iscsi.iscsiadm.IscsiError as e:
self.log.error("Discovery failed", exc_info=True)
raise se.iSCSIDiscoveryError(portal, e)
# I format the data to it's original textual representation the
# response. Why you ask? Backward compatibility! At least now if
# iscsiadm changes the output we can handle it gracefully
fullTargets = []
partialTargets = []
for target in targets:
fullTargets.append("%s:%d,%d %s" %
(target.portal.hostname, target.portal.port,
target.tpgt, target.iqn))
partialTargets.append(target.iqn)
return dict(targets=partialTargets, fullTargets=fullTargets)
@public
def cleanupUnusedConnections(self, options=None):
"""
.. warning::
This method is not yet implemented.
"""
# vars.task.setDefaultException(se.ChangeMeError("%s" % args))
# getExclusiveLock(connectionsResource...)
# TODO: Implement
pass
@public
def refreshVolume(self, sdUUID, spUUID, imgUUID, volUUID):
"""
Refresh low level volume after change in the shared storage initiated
from another host
:param sdUUID: The UUID of the storage domain that owns the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the image contained on the volume.
:type imgUUID: UUID
:param volUUID: The UUID of the volume you want to refresh.
:type volUUID: UUID
:returns: Nothing ? Stuff not implemented
"""
return sdCache.produce(
sdUUID=sdUUID).produceVolume(imgUUID=imgUUID,
volUUID=volUUID).refreshVolume()
@public
def getVolumeSize(self, sdUUID, spUUID, imgUUID, volUUID, options=None):
"""
Gets the size of a volume.
:param sdUUID: The UUID of the storage domain that owns the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the image contained on the volume.
:type imgUUID: UUID
:param volUUID: The UUID of the volume you want to know the size of.
:type volUUID: UUID
:param options: ?
:returns: a dict with the size of the volume.
:rtype: dict
"""
# Return string because xmlrpc's "int" is very limited
dom = sdCache.produce(sdUUID=sdUUID)
apparentsize = str(dom.getVSize(imgUUID, volUUID))
truesize = str(dom.getVAllocSize(imgUUID, volUUID))
return dict(apparentsize=apparentsize, truesize=truesize)
@public
def setVolumeSize(self, sdUUID, spUUID, imgUUID, volUUID, capacity):
capacity = int(capacity)
vol = sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID)
# Values lower than 1 are used to uncommit (marking as inconsisent
# during a transaction) the volume size.
if capacity > 0:
sectors = (capacity + SECTOR_SIZE - 1) / SECTOR_SIZE
else:
sectors = capacity
vol.setSize(sectors)
@public
def getVolumeInfo(self, sdUUID, spUUID, imgUUID, volUUID, options=None):
"""
Gets a volume's info.
:param sdUUID: The UUID of the storage domain that owns the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the image contained on the volume.
:type imgUUID: UUID
:param volUUID: The UUID of the volume you want to get the info on.
:type volUUID: UUID
:param options: ?
:returns: a dict with the info of the volume.
:rtype: dict
"""
vars.task.getSharedLock(STORAGE, sdUUID)
info = sdCache.produce(
sdUUID=sdUUID).produceVolume(imgUUID=imgUUID,
volUUID=volUUID).getInfo()
return dict(info=info)
@public
def appropriateDevice(self, guid, thiefId):
"""
Change ownership of the guid device to vdsm:qemu
Warning: Internal use only.
"""
supervdsm.getProxy().appropriateDevice(guid, thiefId)
supervdsm.getProxy().udevTrigger(guid)
devPath = os.path.join(devicemapper.DMPATH_PREFIX, guid)
utils.retry(partial(fileUtils.validateQemuReadable, devPath),
expectedException=OSError,
timeout=QEMU_READABLE_TIMEOUT)
# Get the size of the logical unit volume.
# Casting to string for keeping consistency with public methods
# that use it to overcome xmlrpc integer size limitation issues.
size = str(multipath.getDeviceSize(devicemapper.getDmId(guid)))
return dict(truesize=size, apparentsize=size, path=devPath)
@public
def inappropriateDevices(self, thiefId):
"""
Warning: Internal use only.
"""
fails = supervdsm.getProxy().rmAppropriateRules(thiefId)
if fails:
self.log.error("Failed to remove the following rules: %s", fails)
@public
def prepareImage(self, sdUUID, spUUID, imgUUID, leafUUID):
"""
Prepare an image, activating the needed volumes.
Return the path to the leaf and an unsorted list of the image volumes.
:param sdUUID: The UUID of the storage domain that owns the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the image contained on the volume.
:type imgUUID: UUID
"""
# If the pool is not blank we should make sure that we are connected
# to the pool.
if spUUID != sd.BLANK_UUID:
self.getPool(spUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
imgVolumesInfo = []
dom = sdCache.produce(sdUUID)
allVols = dom.getAllVolumes()
# Filter volumes related to this image
imgVolumes = sd.getVolsOfImage(allVols, imgUUID).keys()
if leafUUID not in imgVolumes:
raise se.VolumeDoesNotExist(leafUUID)
imgPath = dom.activateVolumes(imgUUID, imgVolumes)
if spUUID and spUUID != sd.BLANK_UUID:
runImgPath = dom.linkBCImage(imgPath, imgUUID)
else:
runImgPath = imgPath
leafPath = os.path.join(runImgPath, leafUUID)
for volUUID in imgVolumes:
path = os.path.join(dom.domaindir, sd.DOMAIN_IMAGES, imgUUID,
volUUID)
volInfo = {'domainID': sdUUID, 'imageID': imgUUID,
'volumeID': volUUID, 'path': path,
'volType': "path"}
leasePath, leaseOffset = dom.getVolumeLease(imgUUID, volUUID)
if leasePath and isinstance(leaseOffset, numbers.Integral):
volInfo.update({
'leasePath': leasePath,
'leaseOffset': leaseOffset,
})
imgVolumesInfo.append(volInfo)
if volUUID == leafUUID:
leafInfo = volInfo
return {'path': leafPath, 'info': leafInfo,
'imgVolumesInfo': imgVolumesInfo}
@public
def teardownImage(self, sdUUID, spUUID, imgUUID, volUUID=None):
"""
Teardown an image deactivating the volumes.
:param sdUUID: The UUID of the storage domain that owns the volume.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that owns the volume.
:type spUUID: UUID
:param imgUUID: The UUID of the image contained on the volume.
:type imgUUID: UUID
"""
vars.task.getSharedLock(STORAGE, sdUUID)
dom = sdCache.produce(sdUUID)
dom.deactivateImage(imgUUID)
@public
def getVolumesList(self, sdUUID, spUUID, imgUUID=volume.BLANK_UUID,
options=None):
"""
Gets a list of all volumes.
:param spUUID: Unused.
:type spUUID: UUID
:param sdUUID: The UUID of the storage domain you want to query.
:type sdUUID: UUID
:param imgUUID: The UUID of the an image you want to filter the
results.
if imgUUID equals :attr:`~volume.BLANK_UUID` no
filtering will be done.
"""
vars.task.getSharedLock(STORAGE, sdUUID)
dom = sdCache.produce(sdUUID=sdUUID)
vols = dom.getAllVolumes()
if imgUUID == volume.BLANK_UUID:
volUUIDs = vols.keys()
else:
volUUIDs = [k for k, v in vols.iteritems() if imgUUID in v.imgs]
return dict(uuidlist=volUUIDs)
@public
def getImagesList(self, sdUUID, options=None):
"""
Gets a list of all the images of specific domain.
:param sdUUID: The UUID of the storage domain you want to query.
:type sdUUID: UUID.
:param options: ?
:returns: a dict with a list of the images belonging to the specified
domain.
:rtype: dict
"""
vars.task.getSharedLock(STORAGE, sdUUID)
dom = sdCache.produce(sdUUID=sdUUID)
images = dom.getAllImages()
return dict(imageslist=list(images))
@public
def storageServer_ConnectionRefs_acquire(self, conRefArgs):
"""
Acquire connection references.
The method will persist the connection info in VDSM and create a
connection reference. Connection references can be accessed by their
IDs where appropriate.
Once a connection reference is created VDSM will try and connect to the
target specified by the connection information if not already
connected. VDSM will keep the target connected as long as a there is a
reference pointing to the same connection information.
:param conRefArgs: A map in the form of
{id: :class:: storageServer.ConnectionInfo), ... }
:rtype: dict {id: errcode, ...}
"""
res = {}
for refId, conInfo in conRefArgs.iteritems():
status = 0
try:
conInfo = storageServer.dict2conInfo(conInfo)
except Exception:
res[refId] = se.StorageServerValidationError.code
continue
try:
self._connectionAliasRegistrar.register(refId, conInfo)
except KeyError:
status = se.StorageServerConnectionRefIdAlreadyInUse.code
except Exception as e:
self.log.error("Could not acquire resource ref for '%s'",
refId, exc_info=True)
status, _ = self._translateConnectionError(e)
try:
self._connectionMonitor.manage(refId)
except Exception as e:
self.log.error("Could not acquire resource ref for '%s'",
refId, exc_info=True)
self._connectionAliasRegistrar.unregister(refId)
status, _ = self._translateConnectionError(e)
res[refId] = status
return {"results": res}
@public
def storageServer_ConnectionRefs_release(self, refIDs):
"""
Release connection references.
Releases the references, if a connection becomes orphaned as a result
of this action the connection will be disconnected. The connection
might remain active if VDSM detects that it is still under use but
will not be kept alive by VDSM anymore.
:param refIDs: a list of strings, each string representing a refIDs.
:rtype: dict {id: errcode, ...}
"""
res = {}
for refID in refIDs:
status = 0
try:
self._connectionMonitor.unmanage(refID)
except KeyError:
# It's OK if we this alias is not managed
pass
try:
self._connectionAliasRegistrar.unregister(refID)
except KeyError:
status = se.StorageServerConnectionRefIdAlreadyInUse.code
except Exception as e:
self.log.error("Could not release resource ref for '%s'",
refID, exc_info=True)
status, _ = self._translateConnectionError(e)
res[refID] = status
return {"results": res}
@deprecated
@public
def getImageDomainsList(self, spUUID, imgUUID, options=None):
"""
Gets a list of all data domains in the pool that contains imgUUID.
:param spUUID: The UUID of the storage pool you want to query.
:type spUUID: UUID
:param imgUUID: The UUID of the image you want to filter by.
:type spUUID: UUID
:param options: ?
:returns: a dict containing the list of domains found.
:rtype: dict
"""
vars.task.setDefaultException(
se.GetStorageDomainListError("spUUID=%s imgUUID=%s" %
(spUUID, imgUUID)))
vars.task.getSharedLock(STORAGE, spUUID)
pool = self.getPool(spUUID)
# Find out domain list from the pool metadata
activeDoms = sorted(pool.getDomains(activeOnly=True).keys())
imgDomains = []
for sdUUID in activeDoms:
dom = sdCache.produce(sdUUID=sdUUID)
if dom.isData():
with rmanager.acquireResource(STORAGE, sdUUID,
rm.LockType.shared):
try:
imgs = dom.getAllImages()
except se.StorageDomainDoesNotExist:
self.log.error("domain %s can't be reached.",
sdUUID, exc_info=True)
else:
if imgUUID in imgs:
imgDomains.append(sdUUID)
return dict(domainslist=imgDomains)
@public
def prepareForShutdown(self, options=None):
"""
Prepares to shutdown host.
Stops all tasks.
.. note::
shutdown cannot be cancelled, must stop all actions.
:param options: ?
"""
# TODO: Implement!!!! TBD: required functionality (stop hsm tasks,
# stop spm tasks if spm etc.)
try:
self._connectionMonitor.stopMonitoring()
sp.StoragePool.cleanupMasterMount()
self.__releaseLocks()
for spUUID in self.pools:
# Stop spmMailer thread
if self.pools[spUUID].spmMailer:
self.pools[spUUID].spmMailer.stop()
self.pools[spUUID].spmMailer.tp.joinAll(waitForTasks=False)
# Stop hsmMailer thread
if self.pools[spUUID].hsmMailer:
self.pools[spUUID].hsmMailer.stop()
# Stop repoStat threads
try:
self.domainMonitor.close()
except Exception:
self.log.warning("Failed to stop RepoStats thread",
exc_info=True)
self.taskMng.prepareForShutdown()
except:
pass
@classmethod
def __releaseLocks(cls):
"""
Releases all locks held by the machine.
"""
# We are initializing the vdsm and should not be holding ANY lock
# so we make sure no locks are held by the machine (e.g. because of
# previous vdsm runs)
# killall -INT will trigger lock release (proper shutdown)
lockCmd = config.get('irs', 'lock_cmd')
try:
misc.killall(lockCmd, signal.SIGUSR1, group=True)
except OSError as e:
if e.errno == errno.ESRCH:
return
raise
cls.log.warning("Found lease locks, releasing")
for i in range(10):
time.sleep(1)
try:
misc.killall(lockCmd, 0)
except OSError as e:
if e.errno == errno.ESRCH:
return
cls.log.warning("Could not release locks, killing lock processes")
misc.killall(lockCmd, signal.SIGKILL, group=True)
@public
def fenceSpmStorage(self, spUUID, lastOwner, lastLver, options=None):
"""
Fences the SPM via the storage. ?
Right now it just clears the owner and last ver fields.
:param spUUID: The UUID of the storage pool you want to modify.
:type spUUID: UUID
:param lastOwner: obsolete
:param lastLver: obsolete
:param options: ?
:returns: a dict containing the spms state?
:rtype: dict
"""
vars.task.setDefaultException(
se.SpmFenceError("spUUID=%s, lastOwner=%s, lastLver=%s" %
(spUUID, lastOwner, lastLver)))
pool = self.getPool(spUUID)
if isinstance(pool.getBackend(), StoragePoolDiskBackend):
pool.getBackend().invalidateMetadata()
vars.task.getExclusiveLock(STORAGE, spUUID)
pool.getBackend().forceFreeSpm()
return dict(spm_st=self._getSpmStatusInfo(pool))
@public
def upgradeStoragePool(self, spUUID, targetDomVersion):
targetDomVersion = int(targetDomVersion)
pool = self.getPool(spUUID)
pool._upgradePool(targetDomVersion)
return {"upgradeStatus": "started"}
def _getDomsStats(self, domainMonitor, doms):
domInfo = {}
repoStats = self._getRepoStats(domainMonitor)
for sdUUID, sdStatus in doms.iteritems():
# Return statistics for active domains only
domInfo[sdUUID] = {'status': sdStatus, 'alerts': [],
'isoprefix': ''}
if sdStatus != sd.DOM_ACTIVE_STATUS or sdUUID not in repoStats:
continue
domInfo[sdUUID]['version'] = repoStats[sdUUID]['result']['version']
# For unreachable domains repoStats will return disktotal and
# diskfree as None.
if (repoStats[sdUUID]['disktotal'] is not None
and repoStats[sdUUID]['diskfree'] is not None):
domInfo[sdUUID]['disktotal'] = repoStats[sdUUID]['disktotal']
domInfo[sdUUID]['diskfree'] = repoStats[sdUUID]['diskfree']
if not repoStats[sdUUID]['mdavalid']:
domInfo[sdUUID]['alerts'].append({
'code': se.SmallVgMetadata.code,
'message': se.SmallVgMetadata.message,
})
self.log.warning("VG %s's metadata size too small %s",
sdUUID, repoStats[sdUUID]['mdasize'])
if not repoStats[sdUUID]['mdathreshold']:
domInfo[sdUUID]['alerts'].append({
'code': se.VgMetadataCriticallyFull.code,
'message': se.VgMetadataCriticallyFull.message,
})
self.log.warning("VG %s's metadata size exceeded critical "
"size: mdasize=%s mdafree=%s", sdUUID,
repoStats[sdUUID]['mdasize'],
repoStats[sdUUID]['mdafree'])
if repoStats[sdUUID]['isoprefix'] is not None:
domInfo[sdUUID]['isoprefix'] = repoStats[sdUUID]['isoprefix']
return domInfo
def _getRepoStats(self, domainMonitor):
repoStats = {}
statsGenTime = time.time()
for sdUUID, domStatus in domainMonitor.getDomainsStatus():
if domStatus.error is None:
code = 0
elif isinstance(domStatus.error, se.StorageException):
code = domStatus.error.code
else:
code = se.StorageException.code
disktotal, diskfree = domStatus.diskUtilization
vgmdtotal, vgmdfree = domStatus.vgMdUtilization
lastcheck = '%.1f' % (statsGenTime - domStatus.checkTime)
repoStats[sdUUID] = {
'finish': domStatus.checkTime,
'result': {
'code': code,
'lastCheck': lastcheck,
'delay': str(domStatus.readDelay),
'valid': (domStatus.error is None),
'version': domStatus.version,
# domStatus.hasHostId can also be None
'acquired': domStatus.hasHostId is True,
'actual': domStatus.actual
},
'disktotal': disktotal,
'diskfree': diskfree,
'mdavalid': domStatus.vgMdHasEnoughFreeSpace,
'mdathreshold': domStatus.vgMdFreeBelowThreashold,
'mdasize': vgmdtotal,
'mdafree': vgmdfree,
'masterValidate': {
'mount': domStatus.masterMounted,
'valid': domStatus.masterValid
},
'isoprefix': domStatus.isoPrefix,
}
return repoStats
@public
def repoStats(self, options=None):
"""
Collects a storage repository's information and stats.
:param options: ?
:returns: result
"""
result = {}
repo_stats = self._getRepoStats(self.domainMonitor)
for d in repo_stats:
result[d] = repo_stats[d]['result']
return result
@deprecated
@public
def startMonitoringDomain(self, sdUUID, hostID, options=None):
with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK,
rm.LockType.exclusive):
self.domainMonitor.startMonitoring(sdUUID, int(hostID), False)
@deprecated
@public
def stopMonitoringDomain(self, sdUUID, options=None):
with rmanager.acquireResource(STORAGE, HSM_DOM_MON_LOCK,
rm.LockType.exclusive):
self.domainMonitor.stopMonitoring([sdUUID])
@public
def getHostLeaseStatus(self, domains):
"""
Returns host lease status for specified domains.
Warning: Internal use only.
:param domains: mapping of host id indexed by domain uuid.
:returns: mapping of host lease status indexed by domain
uuid. See clusterlock.py for possible values and
their meaning.
"""
return {'domains': self.domainMonitor.getHostStatus(domains)}
|
txomon/vdsm
|
vdsm/storage/hsm.py
|
Python
|
gpl-2.0
| 138,543
|
#! /usr/bin/python
import cv2
import sys
from os import path, getenv
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
class RtpViewer:
frame = None
mouse = dict()
def __init__(self, src):
# Create the video capture device
self.cap = cv2.VideoCapture(src)
# Start the ivy interface
self.ivy = IvyMessagesInterface("RTPviewer", start_ivy=False)
self.ivy.start()
# Create a named window and add a mouse callback
cv2.namedWindow('rtp')
cv2.setMouseCallback('rtp', self.on_mouse)
def run(self):
# Start an 'infinite' loop
while True:
# Read a frame from the video capture
ret, self.frame = self.cap.read()
# Quit if frame could not be retrieved or 'q' is pressed
if not ret or cv2.waitKey(1) & 0xFF == ord('q'):
break
# Run the computer vision function
self.cv()
def cv(self):
# If a selection is happening
if self.mouse.get('start'):
# Draw a rectangle indicating the region of interest
cv2.rectangle(self.frame, self.mouse['start'], self.mouse['now'], (0, 255, 0), 2)
# Show the image in a window
cv2.imshow('rtp', self.frame)
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.mouse['start'] = (x, y)
if event == cv2.EVENT_RBUTTONDOWN:
self.mouse['start'] = None
if event == cv2.EVENT_MOUSEMOVE:
self.mouse['now'] = (x, y)
if event == cv2.EVENT_LBUTTONUP:
# If mouse start is defined, a region has been selected
if not self.mouse.get('start'):
return
# Obtain mouse start coordinates
sx, sy = self.mouse['start']
# Create a new message
msg = PprzMessage("datalink", "VIDEO_ROI")
msg['ac_id'] = None
msg['startx'] = sx
msg['starty'] = sy
msg['width'] = abs(x - sx)
msg['height'] = abs(y - sy)
msg['downsized_width'] = self.frame.shape[1]
# Send message via the ivy interface
self.ivy.send_raw_datalink(msg)
# Reset mouse start
self.mouse['start'] = None
def cleanup(self):
# Shutdown ivy interface
self.ivy.shutdown()
if __name__ == '__main__':
viewer = RtpViewer("rtp_viewer.sdp")
if not viewer.cap.isOpened():
viewer.cleanup()
sys.exit("Can't open video stream")
viewer.run()
viewer.cleanup()
|
TomasDuro/paparazzi
|
sw/tools/rtp_viewer/rtp_viewer.py
|
Python
|
gpl-2.0
| 2,844
|
'''
This module controls the dialog to set filter criteria
'''
from PyQt5 import QtCore, Qt, QtWidgets
from views.filter_dialog import Ui_FilterDialog
class FilterGamesController(QtWidgets.QDialog):
'''
Controller object for the filter games dialog.
'''
def __init__(self, table, parent=None):
QtWidgets.QDialog.__init__(self, parent)
self.user_interface = Ui_FilterDialog()
self.user_interface.setupUi(self)
self.table = table
self.canceled = False
self.filtering_all = True
self.initialize_ui()
self.setup_signals()
def initialize_ui(self):
'''
Connects interface's sections with their corresponding models
'''
def assign_model(model, list_widget):
'''
Private function to populate a specific section in the
dialog with the values stored in a model
parameters:
- model: the model assigned to the dialog section
- list_widget: the list widget to be populated
'''
model_qt = Qt.QStandardItemModel()
values_list = model.get_list()
for value in values_list:
item = Qt.QStandardItem(value)
item.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
item.setData(QtCore.Qt.Checked, QtCore.Qt.CheckStateRole)
if model.get_filtered(value):
item.setCheckState(QtCore.Qt.Unchecked)
model_qt.appendRow(item)
list_widget.setModel(model_qt)
assign_model(self.table.models['system_list_model'], self.user_interface.listSystem)
assign_model(self.table.models['status_list_model'], self.user_interface.listStatus)
assign_model(self.table.models['label_list_model'], self.user_interface.listLabel)
assign_model(self.table.models['difficulty_list_model'], self.user_interface.listDifficulty)
def setup_signals(self):
'''
Connects interface's widgets signals to the corresponding slots
'''
def select_all(list_view):
'''
Generic callback for a 'select all' button
parameters:
-list_view: the list affected when the user clicks 'select all'
'''
model_qt = list_view.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
if item.isCheckable() and item.checkState() == QtCore.Qt.Unchecked:
item.setCheckState(QtCore.Qt.Checked)
def deselect_all(list_view):
'''
Generic callback for a 'deselect all' button
parameters:
- list_view: the list affected when the user clicks 'deselect all'
'''
model_qt = list_view.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
if item.isCheckable() and item.checkState() == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
self.user_interface.pushButtonSelectAllSystem.clicked.connect(
lambda: select_all(self.user_interface.listSystem))
self.user_interface.pushButtonDeselectAllSystem.clicked.connect(
lambda: deselect_all(self.user_interface.listSystem))
self.user_interface.pushButtonSelectAllStatus.clicked.connect(
lambda: select_all(self.user_interface.listStatus))
self.user_interface.pushButtonDeselectAllStatus.clicked.connect(
lambda: deselect_all(self.user_interface.listStatus))
self.user_interface.pushButtonSelectAllLabel.clicked.connect(
lambda: select_all(self.user_interface.listLabel))
self.user_interface.pushButtonDeselectAllLabel.clicked.connect(
lambda: deselect_all(self.user_interface.listLabel))
self.user_interface.pushButtonSelectAllDifficulty.clicked.connect(
lambda: select_all(self.user_interface.listDifficulty))
self.user_interface.pushButtonDeselectAllDifficulty.clicked.connect(
lambda: deselect_all(self.user_interface.listDifficulty))
self.user_interface.pushButtonOk.clicked.connect(self.ok_clicked)
self.user_interface.pushButtonCancel.clicked.connect(self.cancel_clicked)
def ok_clicked(self):
'''
Callback for when the user clicks the 'ok' button. The dialog is closed and
the parent is informed by means of an attribute that the changes have to
take effect
'''
self.canceled = False
self.hide()
def cancel_clicked(self):
'''
Callback for when the user clicks the 'cancel' button. The dialog is closed
and the parent is informed by means of an attribute that changes shouldn't
take effect
'''
self.canceled = True
self.hide()
def closeEvent(self, event):
'''
Overriding the closeEvent from the QDialog class. This tells the main window
controller to behave as if the Cancel button was pressed.
parameters:
- event: the passed event (not used in this overriden version)
'''
# pylint: disable=invalid-name
# pylint: disable=unused-argument
self.canceled = True
def apply_filtering(self):
'''
Updates the models with information about which values to be filted
'''
def apply_filtering_per_type(model, list_widget):
'''
Updates a specific model
parameters:
- model: the model to be updated
- list_widget: the list associated to that model
'''
model_qt = list_widget.model()
for index in range(model_qt.rowCount()):
item = model_qt.item(index)
model.set_filtered(str(item.text()), item.checkState() != QtCore.Qt.Checked)
if not self.canceled:
apply_filtering_per_type(
self.table.models['system_list_model'],
self.user_interface.listSystem)
apply_filtering_per_type(
self.table.models['status_list_model'],
self.user_interface.listStatus)
apply_filtering_per_type(
self.table.models['label_list_model'],
self.user_interface.listLabel)
apply_filtering_per_type(
self.table.models['difficulty_list_model'],
self.user_interface.listDifficulty)
self.table.hide_rows()
models = [self.table.models['system_list_model'],
self.table.models['status_list_model'],
self.table.models['label_list_model'],
self.table.models['difficulty_list_model']]
model = 0
while model < len(models) and not models[model].is_any_filtered():
model = model + 1
self.filtering_all = model >= len(models)
|
pablosuau/pyBacklogger
|
controllers/filter_games_controller.py
|
Python
|
gpl-2.0
| 7,069
|
"""thumbbar.py - Thumbnail sidebar for main window."""
import urllib
import Queue
import gtk
import gobject
import threading
from mcomix.preferences import prefs
from mcomix import image_tools
from mcomix import tools
from mcomix import constants
from mcomix import callback
class ThumbnailSidebar(gtk.HBox):
"""A thumbnail sidebar including scrollbar for the main window."""
def __init__(self, window):
gtk.HBox.__init__(self, False, 0)
self._window = window
# Cache/thumbnail load status
self._loaded = False
self._is_loading = False
self._stop_cacheing = False
# Caching threads
self._cache_threads = None
self._currently_selected_page = 0
self._selection_is_forced = False
# models - contains data
self._thumbnail_liststore = gtk.ListStore(gobject.TYPE_INT, gtk.gdk.Pixbuf)
# view - responsible for laying out the columns
self._treeview = gtk.TreeView(self._thumbnail_liststore)
# enable drag and dropping of images from thumbnail bar to some file
# manager
self._treeview.enable_model_drag_source(gtk.gdk.BUTTON1_MASK,
[('text/uri-list', 0, 0)], gtk.gdk.ACTION_COPY)
self._thumbnail_page_treeviewcolumn = gtk.TreeViewColumn(None)
self._thumbnail_image_treeviewcolumn = gtk.TreeViewColumn(None)
self._treeview.append_column(self._thumbnail_page_treeviewcolumn)
self._treeview.append_column(self._thumbnail_image_treeviewcolumn)
self._text_cellrenderer = gtk.CellRendererText()
self._pixbuf_cellrenderer = gtk.CellRendererPixbuf()
bg_colour = prefs['thumb bg colour']
self._pixbuf_cellrenderer.set_property('cell-background-gdk', gtk.gdk.colormap_get_system().alloc_color(gtk.gdk.Color(
bg_colour[0], bg_colour[1], bg_colour[2]), False, True))
self._text_cellrenderer.set_property('background-gdk', gtk.gdk.colormap_get_system().alloc_color(gtk.gdk.Color(
bg_colour[0], bg_colour[1], bg_colour[2]), False, True))
self._thumbnail_page_treeviewcolumn.set_sizing(gtk.TREE_VIEW_COLUMN_GROW_ONLY)
self._thumbnail_page_treeviewcolumn.pack_start(self._text_cellrenderer, False)
self._thumbnail_page_treeviewcolumn.add_attribute(self._text_cellrenderer, 'text', 0)
if not prefs['show page numbers on thumbnails']:
self._thumbnail_page_treeviewcolumn.set_property('visible', False)
self._thumbnail_image_treeviewcolumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self._thumbnail_image_treeviewcolumn.pack_start(self._pixbuf_cellrenderer, True)
self._thumbnail_image_treeviewcolumn.add_attribute(self._pixbuf_cellrenderer, 'pixbuf', 1)
self._thumbnail_image_treeviewcolumn.set_alignment(0.0)
self._layout = gtk.Layout()
self._layout.put(self._treeview, 0, 0)
self.update_layout_size()
self._treeview.set_headers_visible(False)
self._vadjust = self._layout.get_vadjustment()
self._vadjust.step_increment = 15
self._vadjust.page_increment = 1
self._scroll = gtk.VScrollbar(None)
self._scroll.set_adjustment(self._vadjust)
self._selection = self._treeview.get_selection()
self.pack_start(self._layout)
self.pack_start(self._scroll)
self._treeview.connect('columns-changed', self.refresh)
self._treeview.connect('expose-event', self.refresh)
self._treeview.connect_after('drag_begin', self._drag_begin)
self._treeview.connect('drag_data_get', self._drag_data_get)
self._selection.connect('changed', self._selection_event)
self._layout.connect('scroll_event', self._scroll_event)
self.show_all()
def toggle_page_numbers_visible(self):
if prefs['show page numbers on thumbnails']:
self._thumbnail_page_treeviewcolumn.set_property('visible', True)
else:
self._thumbnail_page_treeviewcolumn.set_property('visible', False)
self.update_layout_size()
def update_layout_size(self):
new_width = prefs['thumbnail size'] + 9
if self._window.filehandler.file_loaded and prefs['show page numbers on thumbnails']:
new_width += tools.number_of_digits(self._window.imagehandler.get_number_of_pages()) * 10
if prefs['thumbnail size'] <= 65:
new_width += 8
self._layout.set_size_request(new_width, -1)
self._treeview.set_size_request(new_width, -1)
def get_width(self):
"""Return the width in pixels of the ThumbnailSidebar."""
return self._layout.size_request()[0] + self._scroll.size_request()[0]
def show(self, *args):
"""Show the ThumbnailSidebar."""
self.show_all()
self.load_thumbnails(True)
def hide(self):
"""Hide the ThumbnailSidebar."""
self.hide_all()
def clear(self):
"""Clear the ThumbnailSidebar of any loaded thumbnails."""
self._stop_cacheing = True
if self._cache_threads is not None:
for thread in self._cache_threads:
thread.join()
self._thumbnail_liststore.clear()
self._layout.set_size(0, 0)
self.hide()
self._loaded = False
self._is_loading = False
self._stop_cacheing = False
self._cache_threads = None
self._currently_selected_page = 0
def load_thumbnails(self, force_load=False):
"""Load the thumbnails, if it is appropriate to do so."""
if (not self._window.filehandler.file_loaded or
self._window.imagehandler.get_number_of_pages() == 0 or
self._is_loading or self._loaded or self._stop_cacheing):
return
self._load(force_load)
def refresh(self, *args):
if not self._is_loading:
self._layout.set_size(0, self.get_needed_thumbnail_height())
def resize(self):
"""Reload the thumbnails with the size specified by in the
preferences.
"""
self.clear()
self.load_thumbnails()
def update_select(self):
"""Select the thumbnail for the currently viewed page and make sure
that the thumbbar is scrolled so that the selected thumb is in view.
"""
# this is set to True so that when the event 'scroll-event' is triggered
# the function _scroll_event will not automatically jump to that page.
# this allows for the functionality that when going to a previous page the
# main window will start at the bottom of the image.
self._selection_is_forced = True
self._selection.select_path(
self._window.imagehandler.get_current_page() - 1)
rect = self._treeview.get_background_area(
self._window.imagehandler.get_current_page() - 1, self._thumbnail_image_treeviewcolumn)
if (rect.y < self._vadjust.get_value() or rect.y + rect.height >
self._vadjust.get_value() + self._vadjust.page_size):
value = rect.y + (rect.height // 2) - (self._vadjust.page_size // 2)
value = max(0, value)
value = min(self._vadjust.upper - self._vadjust.page_size, value)
self._vadjust.set_value(value)
def thread_cache_thumbnails(self):
"""Start threaded thumb cacheing.
"""
# Get numbers of pages that need to be cached.
thumbnails_needed = Queue.Queue()
page_count = self._window.imagehandler.get_number_of_pages()
for page in xrange(1, page_count + 1):
thumbnails_needed.put(page)
# Start worker threads
thread_count = 3
self._cache_threads = [
threading.Thread(target=self.cache_thumbnails, args=(thumbnails_needed,))
for _ in range(thread_count) ]
for thread in self._cache_threads:
thread.setDaemon(True)
thread.start()
def cache_thumbnails(self, pages):
""" Done by worker threads to create pixbufs for the
pages passed into <pages>. """
while not self._stop_cacheing and not pages.empty():
try:
page = pages.get_nowait()
except Queue.Empty:
break
pixbuf = self._window.imagehandler.get_thumbnail(page,
prefs['thumbnail size'], prefs['thumbnail size']) or \
constants.MISSING_IMAGE_ICON
pixbuf = image_tools.add_border(pixbuf, 1)
pages.task_done()
if not self._stop_cacheing:
self.thumbnail_loaded(page, pixbuf)
if not self._stop_cacheing:
pages.join()
self._loaded = True
self._is_loading = False
@callback.Callback
def thumbnail_loaded(self, page, pixbuf):
""" Callback when a new thumbnail has been created.
<pixbuf_info> is a tuple of (page, pixbuf). """
iter = self._thumbnail_liststore.iter_nth_child(None, page - 1)
if iter and self._thumbnail_liststore.iter_is_valid(iter):
self._thumbnail_liststore.set(iter, 0, page, 1, pixbuf)
if self._loaded:
# Update height
self._layout.set_size(0, self.get_needed_thumbnail_height())
def _load(self, force_load=False):
# Create empty preview thumbnails.
filler = self.get_empty_thumbnail()
page_count = self._window.imagehandler.get_number_of_pages()
while len(self._thumbnail_liststore) < page_count:
self._thumbnail_liststore.append([len(self._thumbnail_liststore) + 1, filler])
if force_load or prefs['show thumbnails'] or not prefs['delay thumbnails']:
# Start threads for thumbnailing.
self._loaded = False
self._is_loading = True
self._stop_cacheing = False
self.thread_cache_thumbnails()
if not prefs['show thumbnails']:
# The control needs to be exposed at least once to enable height
# calculation.
self.show_all()
self.hide_all()
# Update layout and current image selection in the thumb bar.
self.update_layout_size()
self.update_select()
# Set height appropriate for dummy pixbufs.
if not self._loaded:
pixbuf_padding = 2
self._layout.set_size(0,
filler.get_height() * page_count +
pixbuf_padding * page_count)
def get_thumbnail(self, page):
""" Gets the thumbnail pixbuf for the selected <page>.
Numbering of <page> starts with 1. """
iter = self._thumbnail_liststore.iter_nth_child(None, page - 1)
if iter and self._thumbnail_liststore.iter_is_valid(iter):
return self._thumbnail_liststore.get_value(iter, 1)
else:
return self.get_empty_thumbnail()
def get_needed_thumbnail_height(self):
""" Gets the height for all thumbnails, as indicated by the treeview. """
pages = len(self._thumbnail_liststore)
height = 0
for page in xrange(pages):
height += self._treeview.get_background_area(page,
self._thumbnail_image_treeviewcolumn).height
return height
def _get_selected_row(self):
"""Return the index of the currently selected row."""
try:
return self._selection.get_selected_rows()[1][0][0]
except Exception:
return None
def _selection_event(self, tree_selection, *args):
"""Handle events due to changed thumbnail selection."""
if not self._window.was_out_of_focus:
try:
selected_row = self._get_selected_row()
self._currently_selected_page = selected_row
if not self._selection_is_forced:
self._window.set_page(selected_row + 1)
except Exception:
pass
else:
# if the window was out of focus and the user clicks on
# the thumbbar then do not select that page because they
# more than likely have many pages open and are simply trying
# to give mcomix focus again
self._selection.select_path(self._currently_selected_page)
self._window.was_out_of_focus = False
self._selection_is_forced = False
def _scroll_event(self, widget, event):
"""Handle scroll events on the thumbnail sidebar."""
if event.direction == gtk.gdk.SCROLL_UP:
self._vadjust.set_value(self._vadjust.get_value() - 60)
elif event.direction == gtk.gdk.SCROLL_DOWN:
upper = self._vadjust.upper - self._vadjust.page_size
self._vadjust.set_value(min(self._vadjust.get_value() + 60, upper))
def _drag_data_get(self, treeview, context, selection, *args):
"""Put the URI of the selected file into the SelectionData, so that
the file can be copied (e.g. to a file manager).
"""
try:
selected = self._get_selected_row()
path = self._window.imagehandler.get_path_to_page(selected + 1)
uri = 'file://localhost' + urllib.pathname2url(path)
selection.set_uris([uri])
except Exception:
pass
def _drag_begin(self, treeview, context):
"""We hook up on drag_begin events so that we can set the hotspot
for the cursor at the top left corner of the thumbnail (so that we
might actually see where we are dropping!).
"""
path = treeview.get_cursor()[0]
pixmap = treeview.create_row_drag_icon(path)
# context.set_icon_pixmap() seems to cause crashes, so we do a
# quick and dirty conversion to pixbuf.
pointer = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8,
*pixmap.get_size())
pointer = pointer.get_from_drawable(pixmap, treeview.get_colormap(),
0, 0, 0, 0, *pixmap.get_size())
context.set_icon_pixbuf(pointer, -5, -5)
def change_thumbnail_background_color(self, colour):
self.set_thumbnail_background(colour)
# this hides or shows the HBox and quickly hides/shows it.
# this allows the thumbnail background to update
# when changing the color. if there is a better
# or easier way to force a refresh I have not found it.
if prefs['show thumbnails'] and not (self._window.is_fullscreen and prefs['hide all in fullscreen']):
self.hide_all()
self.show_all()
else:
self.show_all()
self.hide_all()
while gtk.events_pending():
gtk.main_iteration(False)
def get_empty_thumbnail(self):
""" Create an empty filler pixmap. """
pixbuf = gtk.gdk.Pixbuf(colorspace=gtk.gdk.COLORSPACE_RGB,
has_alpha=True,
bits_per_sample=8,
width=prefs['thumbnail size'], height=prefs['thumbnail size'])
# Make the pixbuf transparent.
pixbuf.fill(0)
return pixbuf
def set_thumbnail_background(self, colour):
color = gtk.gdk.colormap_get_system().alloc_color(
gtk.gdk.Color(colour[0], colour[1], colour[2]),
False, True)
self._pixbuf_cellrenderer.set_property('cell-background-gdk',
color)
self._text_cellrenderer.set_property('background-gdk',
color)
# vim: expandtab:sw=4:ts=4
|
mxtthias/mcomix
|
mcomix/thumbbar.py
|
Python
|
gpl-2.0
| 15,561
|
from Components.Converter.Converter import Converter
from Components.Element import cached
from pprint import pprint
# the protocol works as the following:
# lines starting with '-' are fatal errors (no recovery possible),
# lines starting with '=' are progress notices,
# lines starting with '+' are PIDs to record:
# "+d:[p:t[,p:t...]]" with d=demux nr, p: pid, t: type
class Streaming2(Converter):
@cached
def getText(self):
service = self.source.service
if service is None:
return "-NO SERVICE\n"
streaming = service.stream()
s = streaming and streaming.getStreamingData()
if s is None or not any(s):
err = hasattr(service, 'getError') and service.getError()
if err:
return "-SERVICE ERROR:%d\n" % err
else:
return "=NO STREAM\n"
retval = "+%d:%s" % (s["demux"], ','.join(["%x:%s" % (x[0], x[1]) for x in s["pids"]]))
if "default_audio_pid" in s:
retval += ",%x:%s" % (s["default_audio_pid"], "default_audio_pid")
retval += "\n"
return(retval)
text = property(getText)
|
openatv/enigma2
|
lib/python/Components/Converter/Streaming2.py
|
Python
|
gpl-2.0
| 1,029
|
# -*- coding: iso-8859-1 -*-
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.Sources.List import List
from Components.Ipkg import IpkgComponent
from Components.Network import iNetwork
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_METADIR
from Tools.HardwareInfo import HardwareInfo
from time import time
from boxbranding import getImageVersion
class SoftwareTools(PackageInfoHandler):
lastDownloadDate = None
NetworkConnectionAvailable = None
list_updating = False
available_updates = 0
available_updatelist = []
available_packetlist = []
installed_packetlist = {}
def __init__(self):
aboutInfo = getImageVersion()
if aboutInfo.startswith("dev-"):
self.ImageVersion = 'Experimental'
else:
self.ImageVersion = 'Stable'
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
PackageInfoHandler.__init__(self, self.statusCallback, blocking = False, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion)
self.directory = resolveFilename(SCOPE_METADIR)
self.hardware_info = HardwareInfo()
self.list = List([])
self.NotifierCallback = None
self.Console = Console()
self.UpdateConsole = Console()
self.cmdList = []
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
def statusCallback(self, status, progress):
pass
def startSoftwareTools(self, callback = None):
if callback is not None:
self.NotifierCallback = callback
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self, data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.getUpdates()
else:
self.NetworkConnectionAvailable = False
self.getUpdates()
def getUpdates(self, callback = None):
if self.lastDownloadDate is None:
if self.NetworkConnectionAvailable:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
else:
if self.NetworkConnectionAvailable:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
if self.list_updating and callback is not None:
self.NotifierCallback = callback
self.startIpkgListAvailable()
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback(False)
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.startIpkgListAvailable()
pass
def startIpkgListAvailable(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list"
self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback)
def IpkgListAvailableCB(self, result, retval, extra_args = None):
(callback) = extra_args or None
if result:
if self.list_updating:
self.available_packetlist = []
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
self.available_packetlist.append([name, version, descr])
if callback is None:
self.startInstallMetaPackage()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startInstallMetaPackage(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if self.NetworkConnectionAvailable:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta"
self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback)
else:
self.InstallMetaPackageCB(True)
def InstallMetaPackageCB(self, result, retval = None, extra_args = None):
(callback) = extra_args or None
if result:
self.fillPackagesIndexList()
if callback is None:
self.startIpkgListInstalled()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startIpkgListInstalled(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback)
def IpkgListInstalledCB(self, result, retval, extra_args = None):
(callback) = extra_args or None
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
for package in self.packagesIndexlist[:]:
if not self.verifyPrerequisites(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
if "packagetype" in attributes:
if attributes["packagetype"] == "internal":
self.packagesIndexlist.remove(package)
if callback is None:
self.countUpdates()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def countUpdates(self, callback = None):
self.available_updates = 0
self.available_updatelist = []
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
packagename = attributes["packagename"]
for x in self.available_packetlist:
if x[0] == packagename:
if packagename in self.installed_packetlist:
if self.installed_packetlist[packagename] != x[1]:
self.available_updates +=1
self.available_updatelist.append([packagename])
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
elif self.NotifierCallback is not None:
self.NotifierCallback(True)
self.NotifierCallback = None
def startIpkgUpdate(self, callback = None):
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " update"
self.Console.ePopen(cmd, self.IpkgUpdateCB, callback)
def IpkgUpdateCB(self, result, retval, extra_args = None):
(callback) = extra_args or None
if result:
if self.Console:
if len(self.Console.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
def cleanupSoftwareTools(self):
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback = None
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
if self.UpdateConsole is not None:
if len(self.UpdateConsole.appContainers):
for name in self.UpdateConsole.appContainers.keys():
self.UpdateConsole.kill(name)
def verifyPrerequisites(self, prerequisites):
if "hardware" in prerequisites:
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == self.hardware_info.device_name:
hardware_found = True
if not hardware_found:
return False
return True
iSoftwareTools = SoftwareTools()
|
atvcaptain/enigma2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py
|
Python
|
gpl-2.0
| 9,405
|
__author__ = 'fatihka'
from sqlalchemy import Column, Integer, String, Unicode, Float, Boolean, create_engine, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# db_name = 'ww.db'
db_name = ':memory:'
tanimlar = {'company': 'Fatih Ka.', 'optional': 'NO'}
periodss = list()
len_periods = 0
Hesaplar = None
session = None
Base = declarative_base()
__all__ = ['Hesaplar', 'Lead', 'session', 'tanimlar']
class Lead(Base):
__tablename__ = 'ana_hesaplar'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=True)
lead_code = Column(String, nullable=True)
account = Column(String, nullable=True)
account_name = Column(String, nullable=True)
def make_hesaplar():
class Hesaplar(Base):
__table__ = Table('hesaplar', Base.metadata,
Column('id', Integer, primary_key=True),
Column('number', String, nullable=True),
Column('ana_hesap', String, nullable=True),
Column('name', Unicode, nullable=True),
Column('lead_code', String, default='Unmapped', nullable=True),
Column('len', Integer, nullable=True),
Column('bd', Boolean, nullable=True, default=False),
Column('optional', Unicode, nullable=True),
*[Column('%s' % i, Float, nullable=True, default=0) for i in periodss]
)
return Hesaplar
def create_db():
global session
engine = create_engine("sqlite:///%s" % db_name, echo=False) # engine = create_engine("sqlite://", echo=False)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
return session
|
xybydy/kirilim
|
db.py
|
Python
|
gpl-2.0
| 1,832
|
from django.views.generic.base import TemplateView
from sarGraphs.lib.sar import get_cpu, get_load
from sarGraphs.lib.sar import get_swap, get_memory
class HomeView(TemplateView):
'Home Page View'
template_name = "home.html"
def get_context_data(self, **kwargs):
context = {}
context['cpu'] = get_cpu('%idle')
context['iowait'] = get_cpu('%iowait')
context['swap'] = get_swap('%swpused')
context['mem'] = get_memory()
context['load'] = get_load()
return context
|
jness/pysarGraphs
|
sarGraphs/views.py
|
Python
|
gpl-2.0
| 536
|
# -----------------------------------------------------------------------------
# Karajlug.org
# Copyright (C) 2010 Karajlug community
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.contrib import admin
from django.utils.translation import ugettext as _
from .models import Project, Repository
class ProjectAdmin(admin.ModelAdmin):
"""
Admin interface class for project model
"""
list_display = ("__unicode__", "version", "home", "license", "vcs",
"creator", "weight")
ordering = ("weight", )
list_editable = ("home", "weight")
search_fields = ("name", )
prepopulated_fields = {"slug": ("name",)}
list_filter = ("creator", )
def save_model(self, request, obj, form, change):
obj.creator = request.user
obj.save()
class RepositoryAdmin(admin.ModelAdmin):
"""
Admin interface class for repository model
"""
list_display = ("project", "address", "weight")
list_editable = ("address", "weight")
ordering = ("weight", )
search_fields = ("project", )
list_filter = ("project", )
admin.site.register(Project, ProjectAdmin)
admin.site.register(Repository, RepositoryAdmin)
|
Karajlug/karajlug
|
projects/admin.py
|
Python
|
gpl-2.0
| 1,973
|
#!/usr/bin/env python3
from optparse import OptionParser
from datetime import datetime
from datetime import timedelta
import pyopencl as cl
import numpy as np
import time
MIN_ELAPSED = 0.25
KEY_LENGTH = 64
BUF_MAX_SIZE= 1024 * 1024
class BurnInTarget():
def __init__(self, platform, kernel):
self.name = platform.get_info(cl.platform_info.NAME)
self.devices = platform.get_devices()
self.context = cl.Context(self.devices)
self.queue = cl.CommandQueue(self.context)
self.program = cl.Program(self.context, kernel).build()
self.minXSize = 16
self.minYSize = 16
# Host bufs
self.hostInfoBuf = np.array(range(2), dtype=np.uint32)
self.hostInfoBuf[0] = 8 # Rounds for each kernel
self.hostInfoBuf[1] = 8
self.hostInBuf = np.random.rand(BUF_MAX_SIZE).astype(np.uint32)
self.hostOutBuf = np.array(range(BUF_MAX_SIZE), dtype=np.uint32)
# Device bufs
self.devInfoBuf = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.hostInfoBuf)
self.devInBuf = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.hostInBuf)
self.devOutBuf = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.hostOutBuf.nbytes)
def burn(self, shape):
event = self.program.burn(self.queue, shape, None, self.devInfoBuf, self.devInBuf, self.devOutBuf)
return event
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-k", "--kernel",
dest="kernel", default='default.cl',
help="Kernel to burn in")
(opts, args) = parser.parse_args()
kernel = open(opts.kernel).read()
# Get all available device and create context for each
platforms = cl.get_platforms()
targets = []
for p in platforms:
vendor = p.get_info(cl.platform_info.VENDOR)
name = p.get_info(cl.platform_info.NAME)
if('Intel' in vendor):
print("Found platform: %s" % name)
targets.append(BurnInTarget(p, kernel))
# Tune runtime for each target
for t in targets:
xsize = 8
ysize = 32
print("Adjusting runtime for platform: %s" % t.name)
elapsed = timedelta()
while(elapsed.total_seconds() < MIN_ELAPSED):
if(elapsed.total_seconds() < (MIN_ELAPSED/2)):
xsize = xsize << 1
else:
xsize = xsize + 8
# Get some power credit
time.sleep(10)
startTime = datetime.utcnow()
event = t.burn((xsize, ysize))
event.wait()
endTime = datetime.utcnow()
elapsed = endTime - startTime
print("Kernel Elapsed Time: %s" % elapsed.total_seconds())
t.minXSize = xsize
t.minYSize = ysize
print("Final min size: %d, %d" % (t.minXSize, t.minYSize))
# Burn in one by one
time.sleep(20)
for t in targets:
print("Burning platform: %s" % t.name)
startTime = datetime.utcnow()
events =[]
# Make sure this is longer than Tu of PL2
for i in range(16):
events.append(t.burn((8*t.minXSize, 2*t.minYSize)))
for e in events:
e.wait()
endTime = datetime.utcnow()
elapsed = endTime - startTime
print("Kernel Elapsed Time: %s" % elapsed.total_seconds())
time.sleep(20)
#
# # All together
# events =[]
# print("Burning platforms all together, at the same time")
# startTime = datetime.utcnow()
# for i in range(8):
# for t in targets:
# events.append(t.burn((8*t.minXSize, 2*t.minYSize)))
#
# for e in events:
# e.wait()
#
# endTime = datetime.utcnow()
# elapsed = endTime - startTime
# print("Kernel Elapsed Time: %s" % elapsed.total_seconds())
# time.sleep(30)
#
# time.sleep(30)
# print("Burning platforms with sequence")
# events =[]
# startTime = datetime.utcnow()
# for i in range(8):
# for t in sorted(targets, key=lambda x:x.name):
# events.append(t.burn((8*t.minXSize, 2*t.minYSize)))
# time.sleep(2)
#
# for e in events:
# e.wait()
#
# endTime = datetime.utcnow()
# elapsed = endTime - startTime
# print("Kernel Elapsed Time: %s" % elapsed.total_seconds())
#
# time.sleep(30)
# print("Burning platforms with reverse sequence")
# events =[]
# startTime = datetime.utcnow()
# for i in range(8):
# for t in sorted(targets, key=lambda x:x.name, reverse=True):
# events.append(t.burn((8*t.minXSize, 2*t.minYSize)))
# time.sleep(2)
#
# for e in events:
# e.wait()
#
# endTime = datetime.utcnow()
# elapsed = endTime - startTime
# print("Kernel Elapsed Time: %s" % elapsed.total_seconds())
print("Burn in test done", flush=True)
time.sleep(2)
|
jimmysitu/jBenchmark
|
micro-benchmark/MixBurnIn/MixBurnIn.py
|
Python
|
gpl-2.0
| 4,944
|
# peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Spell checking provider
"""
import os, sys
import wx
from wx.lib.pubsub import Publisher
from peppy.yapsy.plugins import *
from peppy.lib.stcspellcheck import *
class SpellCheck(IPeppyPlugin):
"""Plugin for spell check provider
This simple plugin provides the spelling checker for Fundamental mode.
"""
def activateHook(self):
Publisher().subscribe(self.getProvider, 'spelling.provider')
Publisher().subscribe(self.defaultLanguage, 'spelling.default_language')
def deactivateHook(self):
Publisher().unsubscribe(self.getProvider)
Publisher().unsubscribe(self.defaultLanguage)
def getProvider(self, message):
message.data.append(STCSpellCheck)
def defaultLanguage(self, message):
lang = message.data
STCSpellCheck.setDefaultLanguage(lang)
|
robmcmullen/peppy
|
peppy/plugins/spelling.py
|
Python
|
gpl-2.0
| 958
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""extends the standard Python gettext classes
allows multiple simultaneous domains... (makes multiple sessions with different languages easier too)"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gettext
import locale
import os.path
from errno import ENOENT
from jToolkit import languagenames
class ManyTranslations(gettext.NullTranslations):
"""this proxies to many translations"""
def __init__(self, translations=None):
"""Takes an optional sequence of translations."""
gettext.NullTranslations.__init__(self)
if translations is None:
self.translations = []
else:
self.translations = translations
def gettext(self, message):
"""gets the translation of the message by searching through all the domains"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
if tmsg is not None:
return tmsg
return message
def ngettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog[(singular, plural(n))]
if tmsg is not None:
return tmsg
if n == 1:
return singular
else:
return plural
def ugettext(self, message):
"""gets the translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
return unicode(message)
def ungettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog.get((singular, plural(n)), None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
if n == 1:
return unicode(singular)
else:
return unicode(plural)
def getinstalledlanguages(localedir):
"""looks in localedir and returns a list of languages installed there"""
languages = []
def visit(arg, dirname, names):
if 'LC_MESSAGES' in names:
languages.append(os.path.basename(dirname))
os.path.walk(localedir, visit, None)
return languages
def getlanguagenames(languagecodes):
"""return a dictionary mapping the language code to the language name..."""
return dict([(code, languagenames.languagenames.get(code, code)) for code in languagecodes])
def findmany(domains, localedir=None, languages=None):
"""same as gettext.find, but handles many domains, returns many mofiles (not just one)"""
mofiles = []
if languages is None:
languages = getinstalledlanguages(localedir)
for domain in domains:
mofile = gettext.find(domain, localedir, languages)
mofiles.append(mofile)
return mofiles
def translation(domains, localedir=None, languages=None, class_=None):
"""same as gettext.translation, but handles many domains, returns a ManyTranslations object"""
if class_ is None:
class_ = gettext.GNUTranslations
mofiles = findmany(domains, localedir, languages)
# we'll just use null translations where domains are missing ; this code will refuse to
# if None in mofiles:
# missingindex = mofiles.index(None)
# raise IOError(ENOENT, 'No translation file found for domain', domains[missingindex])
translations = []
for mofile in mofiles:
if mofile is None:
t = gettext.NullTranslations()
t._catalog = {}
else:
key = os.path.abspath(mofile)
t = gettext._translations.get(key)
if t is None:
t = gettext._translations.setdefault(key, class_(open(mofile, 'rb')))
translations.append(t)
return ManyTranslations(translations)
def getdefaultlanguage(languagelist):
"""tries to work out the default language from a list"""
def reducelocale(locale):
pos = locale.find('_')
if pos == -1:
return locale
else:
return locale[:pos]
currentlocale, currentencoding = locale.getlocale()
try:
defaultlocale, defaultencoding = locale.getdefaultlocale()
except ValueError:
defaultlocale, defaultencoding = None, None
if len(languagelist) > 0:
if currentlocale is not None:
if currentlocale in languagelist:
return currentlocale
elif reducelocale(currentlocale) in languagelist:
return reducelocale(currentlocale)
if defaultlocale is not None:
if defaultlocale in languagelist:
return defaultlocale
elif reducelocale(defaultlocale) in languagelist:
return reducelocale(defaultlocale)
return languagelist[0]
else:
# if our language list is empty, we'll just ignore it
if currentlocale is not None:
return currentlocale
elif defaultlocale is not None:
return defaultlocale
return None
|
cc-archive/jtoolkit
|
jToolkit/localize.py
|
Python
|
gpl-2.0
| 6,307
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='appserver',
version=version,
description="Sample application server for bilsbrowser",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ElevenCraft Inc.',
author_email='matt@11craft.com',
url='http://github.com/11craft/bilsbrowser/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'Django == 1.0.2-final',
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
11craft/bilsbrowser
|
examples/appserver/setup.py
|
Python
|
gpl-2.0
| 720
|
#!/usr/bin/env python
# calculates safety factor in hoisting lines with various number of parts & rotating sheaves
# (c) 2013, D. Djokic
# No guaranties, whatsoever - use code on your own risk
# Released under GNU General Public License
'''
IADC safety factor recommendations:
drilling and other routine operations = 3
mast rising = 2.5
setting casing = 2
jarring = 2
'''
def get_float(message, default):
#get float number - error check included
try:
f=input (message)
st=type(f)
if f==0:
f=default
return float(f) ##dodo
elif f==" ":
f=default
return float(f) ##dodo
else:
return float(f)
except:
print("Wrong Input! Try again")
return(get_float(message, default))
def get_integer(message, default):
#get integer number - error check included
try:
f=input (message)
st=type(f)
if f==0:
f=default
return int(f)
elif f==" ":
f=default
return int(f)
else:
return int(f)
except:
print("Wrong Input! Try again")
return(get_integer(message, default))
def write_file (file, description, var):
#write to file
file.write ("\n")
file.write (str(description))
file.write ("\t")
file.write (str(var))
W = get_float ("Hoisted Weight in tones (1 ton = 2000lb) = ", 40)
L = get_float ("Capacity of existing wire rope in tones (1 ton = 2000lb): ", 90)
n = get_integer ("Number of lines or '0' for default of 4 = ", 4)
s = get_integer ("Number of rotating sheaves or '0' for equal to number of lines = ", n)
print ("Sheave bearing factor: 1.045 for Bronze Bushing; 1.02 for Roller Bearing" )
K = get_float ("Sheave roller bearing friction factor - enter '0' for default of 1.045 = ", 1.045)
print ("Wire line efficiency due to bending")
print ("1 - D/d ratio = 25:1 - API 9A")
print ("2 - D/d ratio = 40:1 - API 9A")
print ("3 - Input your data for wire line efficiency")
dratio = get_integer ("Choose 1, 2 or 3: ", 1)
if dratio == 1:
wire_eff = 0.95
elif dratio == 2:
wire_eff = 0.97
else:
wire_eff = get_float ("Input wire line efficiency due to bending <1: ", 0.95)
#sfact=L*wire_eff*(K**n-1)/((W*K**s)*(K-1))
mechEfficiency = (K**n-1)/((K**s)*(K-1))
mechAdv = n*mechEfficiency
linePull = W/mechAdv
linePull_bend = linePull/wire_eff
sfact = L/linePull_bend
fname = 'hoisting_sf.txt'
fn = open (fname, 'a')
fn.write ('hoisting_sf.py Output:')
print ("\n\nSafety Factor for Operation with this wire = %f" %(sfact))
print ("\nIADC Recommended Safety Factors:\n\nDrilling and other routine operations = 3\nMast Rising = 2.5\nSetting Casing = 2\nJarring = 2")
write_file (fn, "Hoisted weight in tones (1 ton = 2000 lb): ", W)
write_file (fn, "Capacity of existing wire rope in tones (1 ton = 2000lb): ", L)
write_file (fn, "Number of lines: ", n)
write_file (fn, "Number of rotating sheaves: " ,s)
write_file (fn, "Sheave roller bearing friction factor: ", K)
write_file (fn, "Wire Line Efficiency due to bending: ", wire_eff)
write_file (fn, "Safety Factor: ", sfact)
fn.write("\n\nIADC Recommended Safety Factors:")
fn.write ("\nDrilling and other routine operations = 3")
fn.write ("\nMast Rising = 2.5")
fn.write ("\nSetting Casing = 2")
fn.write ("\nJarring = 2")
fn.write ("\nValidate results! No any warranties are associated with this code!")
fn.close()
print ("Check file 'hoisting_sf.txt' in working folder!")
|
ddjokic/Hoisting
|
hoisting_sf.py
|
Python
|
gpl-2.0
| 3,295
|
from distutils.core import setup, Extension
include_dirs = ['/usr/include', '/usr/local/include']
library_dirs = ['/usr/lib', '/usr/local/lib']
libraries = ['jpeg']
runtime_library_dirs = []
extra_objects = []
define_macros = []
setup(name = "pyjpegoptim",
version = "0.1.1",
author = "Guangming Li",
author_email = "leeful@gmail.com",
license = "GPL",
description = 'a utility for optimizing JPEG files',
url = "https://github.com/cute/pyjpegoptim",
keywords = ['JpegOptim', 'TinyJpeg'],
packages = ["pyjpegoptim"],
ext_package = "pyjpegoptim",
ext_modules = [Extension( name = "jpegoptim",
sources = ["src/jpegoptim.c"],
include_dirs = include_dirs,
library_dirs = library_dirs,
runtime_library_dirs = runtime_library_dirs,
libraries = libraries,
extra_objects = extra_objects,
define_macros = define_macros
)],
)
|
cute/pyjpegoptim
|
setup.py
|
Python
|
gpl-2.0
| 1,139
|
# ####################################################################
# gofed - set of tools to automize packaging of golang devel codes
# Copyright (C) 2014 Jan Chaloupka, jchaloup@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import re
import os
import urllib2
import optparse
from subprocess import Popen, PIPE
from modules.Utils import GREEN, RED, ENDC
from modules.Packages import packageInPkgdb
from modules.Utils import FormatedPrint
from modules.ImportPath import ImportPath
from modules.ImportPathsDecomposer import ImportPathsDecomposer
from modules.GoSymbolsExtractor import GoSymbolsExtractor
from modules.Config import Config
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [-a] [-c] [-d [-v]] [directory]")
parser.add_option_group( optparse.OptionGroup(parser, "directory", "Directory to inspect. If empty, current directory is used.") )
parser.add_option(
"", "-a", "--all", dest="all", action = "store_true", default = False,
help = "Display all imports including golang native"
)
parser.add_option(
"", "-c", "--classes", dest="classes", action = "store_true", default = False,
help = "Decompose imports into classes"
)
parser.add_option(
"", "-d", "--pkgdb", dest="pkgdb", action = "store_true", default = False,
help = "Check if a class is in the PkgDB (only with -c option)"
)
parser.add_option(
"", "-v", "--verbose", dest="verbose", action = "store_true", default = False,
help = "Show all packages if -d option is on"
)
parser.add_option(
"", "-s", "--short", dest="short", action = "store_true", default = False,
help = "Display just classes without its imports"
)
parser.add_option(
"", "", "--spec", dest="spec", action = "store_true", default = False,
help = "Display import path for spec file"
)
parser.add_option(
"", "-r", "--requires", dest="requires", action = "store_true", default = False,
help = "Use Requires instead of BuildRequires. Used only with --spec option."
)
parser.add_option(
"", "", "--skip-errors", dest="skiperrors", action = "store_true", default = False,
help = "Skip all errors during Go symbol parsing"
)
parser.add_option(
"", "", "--importpath", dest="importpath", default = "",
help = "Don't display class belonging to IMPORTPATH prefix"
)
parser.add_option(
"", "", "--scan-all-dirs", dest="scanalldirs", action = "store_true", default = False,
help = "Scan all dirs, including Godeps directory"
)
parser.add_option(
"", "", "--skip-dirs", dest="skipdirs", default = "",
help = "Scan all dirs except specified via SKIPDIRS. Directories are comma separated list."
)
parser.add_option(
"", "", "--all-occurrences", dest="alloccurrences", action = "store_true", default = False,
help = "List imported paths in all packages including main. Default is skip main packages."
)
parser.add_option(
"", "", "--show-occurrence", dest="showoccurrence", action = "store_true", default = False,
help = "Show occurence of import paths."
)
options, args = parser.parse_args()
path = "."
if len(args):
path = args[0]
fmt_obj = FormatedPrint()
if not options.scanalldirs:
noGodeps = Config().getSkippedDirectories()
else:
noGodeps = []
if options.skipdirs:
for dir in options.skipdirs.split(','):
dir = dir.strip()
if dir == "":
continue
noGodeps.append(dir)
gse_obj = GoSymbolsExtractor(path, imports_only=True, skip_errors=options.skiperrors, noGodeps=noGodeps)
if not gse_obj.extract():
fmt_obj.printError(gse_obj.getError())
exit(1)
package_imports_occurence = gse_obj.getPackageImportsOccurences()
ip_used = gse_obj.getImportedPackages()
ipd = ImportPathsDecomposer(ip_used)
if not ipd.decompose():
fmt_obj.printError(ipd.getError())
exit(1)
warn = ipd.getWarning()
if warn != "":
fmt_obj.printWarning("Warning: %s" % warn)
classes = ipd.getClasses()
sorted_classes = sorted(classes.keys())
# get max length of all imports
max_len = 0
for element in sorted_classes:
if element == "Native":
continue
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
for gimport in gimports:
import_len = len(gimport)
if import_len > max_len:
max_len = import_len
if options.spec and options.showoccurrence:
print "# THIS IS NOT A VALID SPEC FORMAT"
print "# COMMENTS HAS TO BE STARTED AT THE BEGGINING OF A LINE"
for element in sorted_classes:
if not options.all and element == "Native":
continue
if not options.alloccurrences:
one_class = []
for gimport in classes[element]:
# does it occur only in main package?
# remove it from classes[element]
skip = True
if gimport in package_imports_occurence:
for occurrence in package_imports_occurence[gimport]:
if not occurrence.endswith(":main"):
skip = False
break
if skip:
continue
one_class.append(gimport)
classes[element] = sorted(one_class)
# class name starts with prefix => filter out
if options.importpath != "" and element.startswith(options.importpath):
continue
# filter out all members of a class prefixed by prefix
gimports = []
for gimport in classes[element]:
if options.importpath != "" and gimport.startswith(options.importpath):
continue
gimports.append(gimport)
if gimports == []:
continue
if options.classes:
# Native class is just printed
if options.all and element == "Native":
# does not make sense to check Native class in PkgDB
if options.pkgdb:
continue
print "Class: %s" % element
if not options.short:
for gimport in gimports:
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Translate non-native class into package name (if -d option)
if options.pkgdb:
ip_obj = ImportPath(element)
if not ip_obj.parse():
fmt_obj.printWarning("Unable to translate %s to package name" % element)
continue
pkg_name = ip_obj.getPackageName()
if pkg_name == "":
fmt_obj.printWarning(ip_obj.getError())
pkg_in_pkgdb = packageInPkgdb(pkg_name)
if pkg_in_pkgdb:
if options.verbose:
print (GREEN + "Class: %s (%s) PkgDB=%s" + ENDC) % (element, pkg_name, pkg_in_pkgdb)
else:
print (RED + "Class: %s (%s) PkgDB=%s" + ENDC ) % (element, pkg_name, pkg_in_pkgdb)
continue
# Print class
print "Class: %s" % element
if not options.short:
for gimport in sorted(gimports):
if options.showoccurrence:
print "\t%s (%s)" % (gimport, ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
continue
# Spec file BR
if options.spec:
for gimport in sorted(classes[element]):
if options.requires:
if options.showoccurrence:
import_len = len(gimport)
print "Requires: golang(%s) %s# %s" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "Requires: golang(%s)" % gimport
else:
if options.showoccurrence:
import_len = len(gimport)
print "BuildRequires: golang(%s) %s# %s" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "BuildRequires: golang(%s)" % gimport
continue
# Just a list of all import paths
for gimport in sorted(classes[element]):
if options.showoccurrence:
import_len = len(gimport)
print "\t%s %s(%s)" % (gimport, (max_len - import_len)*" ", ", ".join(package_imports_occurence[gimport]))
else:
print "\t%s" % gimport
|
piotr1212/gofed
|
ggi.py
|
Python
|
gpl-2.0
| 8,925
|
#
# livef1
#
# f1comment.py - classes to store the live F1 comments
#
# Copyright (c) 2014 Marc Bertens <marc.bertens@pe2mbs.nl>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Special thanks to the live-f1 project 'https://launchpad.net/live-f1'
# * Scott James Remnant
# * Dave Pusey
#
# For showing the way of program logic.
#
import logging
import time
import datetime
__version__ = "0.1"
__applic__ = "Live F1 Web"
__author__ = "Marc Bertens"
class F1Text( object ):
def __init__( self, ts = 0, c = '', t = '' ):
self.timestamp = ts
self.clock = c
self.text = t
return
def reset( self ):
self.timestamp = 0
self.clock = ""
self.text = ""
return
class F1Commentary( object ):
def __init__( self, log ):
self.lines = []
self.log = log
return
def reset( self ):
self.lines = []
return
def gethtml( self, div_tag_name ):
output = ""
for elem in self.lines:
if elem.clock:
sep = "-"
else:
sep = ""
#endif
output = "<tr valign='top'><td>%s</td><td>%s</td><td>%s</td></tr>" % (
elem.clock, sep, elem.text ) + output
return """<div class="%s"><table>%s</table></div>""" % ( div_tag_name, output )
def append( self, new ):
#self.log.info( "Commentary.time : %i" % ( new.timestamp ) )
#self.log.info( "Commentary.text : %s" % ( new.text ) )
if not new.clock:
secs = new.timestamp % 60
mins = new.timestamp // 60
hours = 0
if ( mins > 60 ):
hours = mins // 60
mins = mins % 60
# endif
# add time stamp
new.clock = "%02i:%02i" % ( hours, mins )
self.lines.append( F1Text( new.timestamp, new.clock, new.text ) )
return
def dump( self ):
for elem in self.lines:
self.log.info( "Commentary : %s" % ( elem.text ) )
# next
return
comment = None
def GetCommentary():
global comment
if comment == None:
comment = F1Commentary( logging.getLogger( 'live-f1' ) )
return comment
# end def
|
livef1/Livef1-web
|
src/comment.py
|
Python
|
gpl-2.0
| 3,104
|
#_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2007)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
from itertools import izip
import numpy
import time
import ds9
from sherpa.utils import get_keyword_defaults, SherpaFloat
from sherpa.utils.err import DS9Err
_target = 'sherpa'
def _get_win():
return ds9.ds9(_target)
def doOpen():
_get_win()
def isOpen():
targets = ds9.ds9_targets()
if targets is None:
return False
if type(targets) in (list,):
for target in targets:
if _target in target:
return True
return False
def close():
if isOpen():
imager = _get_win()
imager.set("quit")
def delete_frames():
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
imager.set("frame delete all")
return imager.set("frame new")
except:
raise DS9Err('delframe')
def get_region(coord):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
regionstr = "regions -format saoimage -strip yes"
if (coord != ''):
if (coord != 'image'):
regionstr = "regions -format ciao -strip yes -system " + str(coord)
else:
regionstr = "regions -format saoimage -strip yes -system image"
reg = imager.get(regionstr)
reg = reg.replace(';','')
return reg
except:
raise DS9Err('retreg')
def image(arr, newframe=False, tile=False):
if not isOpen():
doOpen()
imager = _get_win()
if newframe is True:
try:
imager.set("frame new")
imager.set("frame last")
except:
raise DS9Err('newframe')
try:
if tile is True:
imager.set("tile yes")
else:
imager.set("tile no")
except:
raise DS9Err('settile')
time.sleep(1)
try:
# pyds9 expects shape[::-1] compared to DS9.py
# therefore transpose the image before sending
arr = numpy.asarray(arr, dtype=SherpaFloat)
imager.set_np2arr(arr.T)
except:
raise # DS9Err('noimage')
def _set_wcs(keys):
eqpos, sky, name = keys
phys = ''
wcs = "OBJECT = '%s'\n" % name
if eqpos is not None:
wcrpix = eqpos.crpix
wcrval = eqpos.crval
wcdelt = eqpos.cdelt
if sky is not None:
pcrpix = sky.crpix
pcrval = sky.crval
pcdelt = sky.cdelt
# join together all strings with a '\n' between each
phys = '\n'.join(["WCSNAMEP = 'PHYSICAL'",
"CTYPE1P = 'x '",
'CRVAL1P = %.14E' % pcrval[0],
'CRPIX1P = %.14E' % pcrpix[0],
'CDELT1P = %.14E' % pcdelt[0],
"CTYPE2P = 'y '",
'CRVAL2P = %.14E' % pcrval[1],
'CRPIX2P = %.14E' % pcrpix[1],
'CDELT2P = %.14E' % pcdelt[1]])
if eqpos is not None:
wcdelt = wcdelt * pcdelt
wcrpix = ((wcrpix - pcrval) /
pcdelt + pcrpix )
if eqpos is not None:
# join together all strings with a '\n' between each
wcs = wcs + '\n'.join(["RADECSYS = 'ICRS '",
"CTYPE1 = 'RA---TAN'",
'CRVAL1 = %.14E' % wcrval[0],
'CRPIX1 = %.14E' % wcrpix[0],
'CDELT1 = %.14E' % wcdelt[0],
"CTYPE2 = 'DEC--TAN'",
'CRVAL2 = %.14E' % wcrval[1],
'CRPIX2 = %.14E' % wcrpix[1],
'CDELT2 = %.14E' % wcdelt[1]])
# join the wcs and physical with '\n' between them and at the end
return ('\n'.join([wcs,phys]) + '\n')
def wcs(keys):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
info = _set_wcs( keys )
try:
# use stdin to pass the WCS info
imager.set('wcs replace', info)
except:
raise DS9Err('setwcs')
def open():
doOpen()
def set_region(reg, coord):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
try:
if (access(reg, R_OK) is True):
imager.set("regions load " + "'" + reg + "'")
else:
# Assume region string has to be in CIAO format
regions = reg.split(";")
for region in regions:
if (region != ''):
if (coord != ''):
imager.set("regions", str(coord) + ";" + region)
else:
imager.set("regions", region)
except:
raise DS9Err('badreg', str(reg))
def xpaget(arg):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
return imager.get(arg)
def xpaset(arg, data=None):
if not isOpen():
raise DS9Err('open')
imager = _get_win()
return imager.set(arg, data)
|
brefsdal/sherpa
|
sherpa/image/pyds9_backend.py
|
Python
|
gpl-2.0
| 5,145
|
#!/usr/bin/env python
# Generate the body of ieee.numeric_std and numeric_bit from a template.
# The implementation is based only on the specification and on testing (as
# the specifications are often ambiguous).
# The algorithms are very simple: carry ripple adder, restoring division.
# This file is part of GHDL.
# Both this file and the outputs of this file are copyrighted.
# Copyright (C) 2015 Tristan Gingold
#
# GHDL is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2, or (at your option) any later
# version.
#
# GHDL is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING2. If not see
# <http://www.gnu.org/licenses/>.
import re
import sys
# My python 'style' and knowledge is basic... Do not hesitate to comment.
binary_funcs = [ "and", "nand", "or", "nor", "xor" ]
compare_funcs = [ "=", "/=", ">", ">=", "<", "<=" ]
vec_types = ['UNSIGNED', 'SIGNED']
logics = ['bit', 'std']
logic_types = {'bit' : 'bit', 'std': 'sl_x01' }
logic_undefs = {'bit' : "'0'", 'std': "'X'" }
logic = 'xx' # Current logic, either bit or std
v93=False
# Stream to write.
out=sys.stdout
def w(s):
"Write S to the output"
out.write(s)
def logic_type():
return logic_types[logic]
def logic_undef():
return logic_undefs[logic]
def disp_vec_binary(func, typ):
"Generate the body of a vector binary logic function"
res = """
function "{0}" (l, r : {1}) return {1}
is
subtype res_type is {1} (l'length - 1 downto 0);
alias la : res_type is l;
alias ra : {1} (r'length - 1 downto 0) is r;
variable res : res_type;
begin
if la'left /= ra'left then
assert false
report "NUMERIC_STD.""{0}"": arguments are not of the same length"
severity failure;
res := (others => """ + logic_undef() + """);
else
for I in res_type'range loop
res (I) := la (I) {0} ra (I);
end loop;
end if;
return res;
end "{0}";\n"""
w (res.format(func, typ))
def disp_non_logical_warning(func):
return """
assert NO_WARNING
report "NUMERIC_STD.""{0}"": non logical value detected"
severity warning;""".format(func)
def conv_bit(expr):
if logic == 'std':
return "sl_to_x01 (" + expr + ")"
else:
return expr
def extract_bit(name):
res = "{0}b := " + conv_bit ("{0}a (i)") + ";"
return res.format(name)
def init_carry(func):
if func == '+':
return """
carry := '0';"""
else:
return """
carry := '1';"""
def extract_extend_bit(name,typ):
res = """
if i > {0}a'left then
{0}b := """
if typ == 'UNSIGNED':
res += "'0';"
else:
res += "{0} ({0}'left);"
res += """
else
""" + extract_bit(name) + """
end if;"""
return res.format(name)
def disp_vec_vec_binary(func, typ):
"Generate vector binary function body"
res = """
function "{0}" (l, r : {1}) return {1}
is
constant lft : integer := MAX (l'length, r'length) - 1;
subtype res_type is {1} (lft downto 0);
alias la : {1} (l'length - 1 downto 0) is l;
alias ra : {1} (r'length - 1 downto 0) is r;
variable res : res_type;
variable lb, rb, carry : """ + logic_type () + """;
begin
if la'left < 0 or ra'left < 0 then
return null_{1};
end if;"""
res += init_carry(func)
res += """
for i in 0 to lft loop"""
res += extract_extend_bit('l', typ)
res += extract_extend_bit('r', typ)
if logic == 'std':
res += """
if lb = 'X' or rb = 'X' then""" + \
disp_non_logical_warning(func) + """
res := (others => 'X');
exit;
end if;"""
if func == '-':
res += """
rb := not rb;"""
res += """
res (i) := compute_sum (carry, rb, lb);
carry := compute_carry (carry, rb, lb);
end loop;
return res;
end "{0}";
"""
w (res.format (func, typ))
def declare_int_var(name, typ):
res = """
variable {0}1, {0}2 : {1};
variable {0}d : nat1;""";
if typ == "INTEGER":
res += """
constant {0}msb : nat1 := boolean'pos({0} < 0);"""
return res.format(name, typ)
def init_int_var(name, typ):
return """
{0}1 := {0};""".format(name);
def extract_int_lsb(name, typ):
res = """
{0}2 := {0}1 / 2;"""
if typ == "INTEGER":
res += """
if {0}1 < 0 then
{0}d := 2 * {0}2 - {0}1;
{0}1 := {0}2 - {0}d;
else
{0}d := {0}1 - 2 * {0}2;
{0}1 := {0}2;
end if;"""
else:
res += """
{0}d := {0}1 - 2 * {0}2;
{0}1 := {0}2;"""
res += """
{0}b := nat1_to_01 ({0}d);"""
return res.format(name,typ)
def check_int_truncated(func, name, typ):
if typ == "INTEGER":
v = "-{0}msb".format(name)
else:
v = "0"
return """
if {1}1 /= {2} then
assert NO_WARNING
report "NUMERIC_STD.""{0}"": vector is truncated"
severity warning;
end if;""".format(func, name, v)
def create_vec_int_dict(func, left, right):
if left in vec_types:
dic = {'vtype': left,
'itype': right,
'vparam': 'l',
'iparam': 'r'}
else:
dic = {'vtype': right,
'itype': left,
'vparam': 'r',
'iparam': 'l'}
dic.update({'ltype': left,
'rtype': right,
'func': func,
'logic': logic_type()})
return dic
def disp_vec_int_binary(func, left, right):
"Generate vector binary function body"
dic = create_vec_int_dict(func, left, right)
res = """
function "{func}" (l : {ltype}; r : {rtype}) return {vtype}
is
subtype res_type is {vtype} ({vparam}'length - 1 downto 0);
alias {vparam}a : res_type is {vparam};""" + \
declare_int_var (dic["iparam"], dic["itype"]) + """
variable res : res_type;
variable lb, rb, carry : {logic};
begin
if res'length < 0 then
return null_{vtype};
end if;"""
# Initialize carry. For subtraction, use 2-complement.
res += init_carry(func)
res += init_int_var(dic['iparam'], dic['itype']) + """
for i in res'reverse_range loop
""" + extract_bit(dic['vparam']) + "\n" + \
extract_int_lsb(dic['iparam'], dic['itype']);
if logic == 'std':
res += """
if {vparam}b = 'X' then""" + \
disp_non_logical_warning(func) + """
res := (others => 'X');
{iparam}1 := 0;
exit;
end if;"""
# 2-complement for subtraction
if func == '-':
res += """
rb := not rb;"""
res += """
res (i) := compute_sum (carry, rb, lb);
carry := compute_carry (carry, rb, lb);
end loop;""" + \
check_int_truncated(func, dic['iparam'], dic['itype']) + """
return res;
end "{func}";\n"""
w(res.format (**dic))
def disp_vec_int_gcompare(func, left, right):
"Generate comparison function"
dic = create_vec_int_dict(func, left, right)
res = """
function {func} (l : {ltype}; r : {rtype}) return compare_type
is
subtype res_type is {vtype} ({vparam}'length - 1 downto 0);
alias la : res_type is l;""" + \
declare_int_var (dic['iparam'], dic['itype']) + """
variable lb, rb : {logic};
variable res : compare_type;
begin
res := compare_eq;""";
res += init_int_var(dic['iparam'], dic['itype']) + """
for i in {vparam}a'reverse_range loop
""" + extract_bit (dic['vparam']) + \
extract_int_lsb("r", right)
if logic == 'std':
res += """
if {vparam}b = 'X' then
return compare_unknown;
end if;"""
res += """
if lb = '1' and rb = '0' then
res := compare_gt;
elsif lb = '0' and rb = '1' then
res := compare_lt;
end if;
end loop;"""
if func == "ucompare":
res += """
if r1 /= 0 then
res := compare_lt;
end if;"""
else:
res += """
if """ + conv_bit ("l (l'left)") + """ = '1' then
if r >= 0 then
res := compare_lt;
end if;
else
if r < 0 then
res := compare_gt;
end if;
end if;"""
res += """
return res;
end {func};
"""
w(res.format (**dic))
def disp_vec_int_compare(func, left, right):
"Generate comparison function"
dic = create_vec_int_dict(func, left, right)
res = """
function "{func}" (l : {ltype}; r : {rtype}) return boolean
is
subtype res_type is {vtype} ({vparam}'length - 1 downto 0);
alias {vparam}a : res_type is {vparam};""" + \
declare_int_var (dic['iparam'], dic['itype']) + """
variable res : compare_type;
begin
if {vparam}'length = 0 then
assert NO_WARNING
report "NUMERIC_STD.""{func}"": null argument, returning FALSE"
severity warning;
return false;
end if;
res := """
if left == "SIGNED" or right == "SIGNED":
res += "scompare"
else:
res += "ucompare"
if left in vec_types:
res += " (l, r);"
else:
res += " (r, l);"
if logic == 'std':
res += """
if res = compare_unknown then""" + \
disp_non_logical_warning(func) + """
return false;
end if;"""
if left in vec_types:
res += """
return res {func} compare_eq;"""
else:
res += """
return compare_eq {func} res;"""
res += """
end "{func}";
"""
w(res.format (**dic))
def disp_vec_vec_gcompare(func, typ):
"Generate comparison function"
res = """
function {func} (l, r : {typ}) return compare_type
is
constant sz : integer := MAX (l'length, r'length) - 1;
alias la : {typ} (l'length - 1 downto 0) is l;
alias ra : {typ} (r'length - 1 downto 0) is r;
variable lb, rb : {logic};
variable res : compare_type;
begin"""
if typ == 'SIGNED':
res += """
-- Consider sign bit as S * -(2**N).
lb := """ + conv_bit ("la (la'left)") + """;
rb := """ + conv_bit ("ra (ra'left)") + """;
if lb = '1' and rb = '0' then
return compare_lt;
elsif lb = '0' and rb = '1' then
return compare_gt;
else
res := compare_eq;
end if;"""
else:
res += """
res := compare_eq;"""
if typ == 'SIGNED':
res += """
for i in 0 to sz - 1 loop"""
else:
res += """
for i in 0 to sz loop"""
res += extract_extend_bit('l', typ)
res += extract_extend_bit('r', typ)
if logic == 'std':
res += """
if lb = 'X' or rb = 'X' then
return compare_unknown;
end if;"""
res += """
if lb = '1' and rb = '0' then
res := compare_gt;
elsif lb = '0' and rb = '1' then
res := compare_lt;
end if;
end loop;
return res;
end {func};\n"""
w(res.format (func=func, typ=typ, logic=logic_type()))
def disp_vec_vec_compare(func, typ):
"Generate comparison function"
res = """
function "{func}" (l, r : {typ}) return boolean
is
variable res : compare_type;
begin
if l'length = 0 or r'length = 0 then
assert NO_WARNING
report "NUMERIC_STD.""{func}"": null argument, returning FALSE"
severity warning;
return false;
end if;
res := """
if typ == "SIGNED":
res += "scompare"
else:
res += "ucompare"
res += """ (l, r);"""
if logic == 'std':
res += """
if res = compare_unknown then""" + \
disp_non_logical_warning(func) + """
return false;
end if;"""
res += """
return res {func} compare_eq;
end "{func}";\n"""
w(res.format (func=func, typ=typ))
def disp_vec_not(typ):
"Generate vector binary function body"
w("""
function "not" (l : {0}) return {0}
is
subtype res_type is {0} (l'length - 1 downto 0);
alias la : res_type is l;
variable res : res_type;
begin
for I in res_type'range loop
res (I) := not la (I);
end loop;
return res;
end "not";\n""".format(typ))
def disp_resize(typ):
res = """
function resize (ARG : {0}; NEW_SIZE: natural) return {0}
is
alias arg1 : {0} (ARG'length - 1 downto 0) is arg;
variable res : {0} (new_size - 1 downto 0) := (others => '0');
begin
if new_size = 0 then
return null_{0};
end if;
if arg1'length = 0 then
return res;
end if;
if arg1'length > new_size then
-- Reduction."""
if typ == 'SIGNED':
res += """
res (res'left) := arg1 (arg1'left);
res (res'left - 1 downto 0) := arg1 (res'left - 1 downto 0);"""
else:
res += """
res := arg1 (res'range);"""
res += """
else
-- Expansion
res (arg1'range) := arg1;"""
if typ == 'SIGNED':
res += """
res (res'left downto arg1'length) := (others => arg1 (arg1'left));"""
res += """
end if;
return res;
end resize;\n"""
w(res.format(typ))
def gen_shift(dir, inv):
if (dir == 'left') ^ inv:
res = """
res (res'left downto {opp}count) := arg1 (arg1'left {sub} count downto 0);"""
else:
res = """
res (res'left {sub} count downto 0) := arg1 (arg1'left downto {opp}count);"""
if inv:
return res.format(opp="-", sub="+")
else:
return res.format(opp="", sub="-")
def disp_shift_op(name, typ, dir):
res = """
function {0} (ARG : {1}; COUNT: INTEGER) return {1}
is
subtype res_type is {1} (ARG'length - 1 downto 0);
alias arg1 : res_type is arg;
variable res : res_type := (others => '0');
begin
if res'length = 0 then
return null_{1};
end if;
if count >= 0 and count <= arg1'left then"""
res += gen_shift(dir, False)
res += """
elsif count < 0 and count >= -arg1'left then"""
res += gen_shift(dir, True)
res += """
end if;
return res;
end {0};\n"""
w(res.format(name, typ))
def disp_shift(name, typ, dir):
res = """
function {0} (ARG : {1}; COUNT: NATURAL) return {1}
is
subtype res_type is {1} (ARG'length - 1 downto 0);
alias arg1 : res_type is arg;
variable res : res_type := (others => """
if typ == 'SIGNED' and dir == 'right':
res += "arg1 (arg1'left)"
else:
res += "'0'"
res += """);
begin
if res'length = 0 then
return null_{1};
end if;
if count <= arg1'left then"""
res += gen_shift(dir, False)
res += """
end if;
return res;
end {0};\n"""
w(res.format(name, typ))
def disp_rotate(name, typ, dir):
if 'rotate' in name:
count_type = 'natural'
op = 'rem'
else:
count_type = 'integer'
op = 'mod'
res = """
function {0} (ARG : {1}; COUNT: {2}) return {1}
is
subtype res_type is {1} (ARG'length - 1 downto 0);
alias arg1 : res_type is arg;
variable res : res_type := (others => '0');
variable cnt : natural;
begin
if res'length = 0 then
return null_{1};
end if;
cnt := count """ + op + " res'length;"
if dir == 'left':
res += """
res (res'left downto cnt) := arg1 (res'left - cnt downto 0);
res (cnt - 1 downto 0) := arg1 (res'left downto res'left - cnt + 1);"""
else:
res += """
res (res'left - cnt downto 0) := arg1 (res'left downto cnt);
res (res'left downto res'left - cnt + 1) := arg1 (cnt - 1 downto 0);"""
res += """
return res;
end {0};\n"""
w(res.format(name, typ, count_type))
def disp_vec_vec_mul(func, typ):
res = """
function "{0}" (L, R : {1}) return {1}
is
alias la : {1} (L'Length - 1 downto 0) is l;
alias ra : {1} (R'Length - 1 downto 0) is r;
variable res : {1} (L'length + R'Length -1 downto 0) := (others => '0');
variable rb, lb, vb, carry : """ + logic_type() + """;
begin
if la'length = 0 or ra'length = 0 then
return null_{1};
end if;
-- Shift and add L.
for i in natural range 0 to ra'left """
if typ == 'SIGNED':
res += "- 1 "
res += """loop
""" + extract_bit ('r') + """
if rb = '1' then
-- Compute res := res + shift_left (l, i).
carry := '0';
for j in la'reverse_range loop
lb := la (j);
vb := res (i + j);
res (i + j) := compute_sum (carry, vb, lb);
carry := compute_carry (carry, vb, lb);
end loop;"""
if typ == 'UNSIGNED':
res += """
-- Propagate carry.
for j in i + la'length to res'left loop
exit when carry = '0';
vb := res (j);
res (j) := carry xor vb;
carry := carry and vb;
end loop;"""
else:
res += """
-- Sign extend and propagate carry.
lb := la (la'left);
for j in i + l'length to res'left loop
vb := res (j);
res (j) := compute_sum (carry, vb, lb);
carry := compute_carry (carry, vb, lb);
end loop;"""
if logic == 'std':
res += """
elsif rb = 'X' then""" + \
disp_non_logical_warning (func)
res += """
end if;
end loop;"""
if typ == 'SIGNED':
res += """
if ra (ra'left) = '1' then
-- R is a negative number. It is considered as:
-- -2**n + (Rn-1 Rn-2 ... R0).
-- Compute res := res - 2**n * l.
carry := '1';
for i in la'reverse_range loop
vb := res (ra'length - 1 + i);
lb := not la (i);
res (ra'length - 1+ i) := compute_sum (carry, vb, lb);
carry := compute_carry (carry, vb, lb);
end loop;
vb := res (res'left);
lb := not la (la'left);
res (res'left) := compute_sum (carry, vb, lb);
end if;"""
res += """
return res;
end "{0}";\n"""
w(res.format(func,typ))
def disp_vec_int_mul(left, right):
res = """
function "*" (L : {0}; R : {1}) return {0}
is
constant size : natural := l'length;
begin
if size = 0 then
return null_{0};
end if;
return l * to_{0} (r, size);
end "*";\n"""
w (res.format(left,right))
def disp_int_vec_mul(left, right):
res = """
function "*" (L : {0}; R : {1}) return {1}
is
constant size : natural := r'length;
begin
if size = 0 then
return null_{1};
end if;
return r * to_{1} (l, size);
end "*";\n"""
w (res.format(left,right))
def disp_neg(func):
res = """
function "{func}" (ARG : SIGNED) return SIGNED
is
subtype arg_type is SIGNED (ARG'length - 1 downto 0);
alias arga : arg_type is arg;
variable res : arg_type;
variable carry, a : """ + logic_type() + """;
begin
if arga'length = 0 then
return null_signed;
end if;"""
if logic == 'std':
res += """
if has_0x (arga) = 'X' then""" + \
disp_non_logical_warning("-") + """
return arg_type'(others => 'X');
end if;"""
if func == 'abs':
res += """
if arga (arga'left) = '0' then
return arga;
end if;"""
res += """
carry := '1';
for i in arga'reverse_range loop
a := not arga (i);
res (i) := carry xor a;
carry := carry and a;
end loop;
return res;
end "{func}";\n"""
w(res.format(func=func))
def disp_has_0x(typ):
res = """
function has_0x (a : {0}) return {1}
is
variable res : {1} := '0';
begin
for i in a'range loop"""
if logic == 'std':
res += """
if a (i) = 'X' then
return 'X';
end if;"""
res += """
res := res or a (i);
end loop;
return res;
end has_0x;\n"""
w(res.format(typ, logic_type()))
def disp_size():
w("""
function size_unsigned (n : natural) return natural
is
-- At least one bit (even for 0).
variable res : natural := 1;
variable n1 : natural := n;
begin
while n1 > 1 loop
res := res + 1;
n1 := n1 / 2;
end loop;
return res;
end size_unsigned;\n""")
w("""
function size_signed (n : integer) return natural
is
variable res : natural := 1;
variable n1 : natural;
begin
if n >= 0 then
n1 := n;
else
-- Use /N = -X -1 = -(X + 1) (No overflow).
n1 := -(n + 1);
end if;
while n1 /= 0 loop
res := res + 1;
n1 := n1 / 2;
end loop;
return res;
end size_signed;\n""")
def disp_divmod():
w("""
-- All index range are normalized (N downto 0).
-- NUM and QUOT have the same range.
-- DEM and REMAIN have the same range.
-- No 'X'.
procedure divmod (num, dem : UNSIGNED; quot, remain : out UNSIGNED)
is
variable reg : unsigned (dem'left + 1 downto 0) := (others => '0');
variable sub : unsigned (dem'range) := (others => '0');
variable carry, d : """ + logic_type () + """;
begin
for i in num'range loop
-- Shift
reg (reg'left downto 1) := reg (reg'left - 1 downto 0);
reg (0) := num (i);
-- Substract
carry := '1';
for j in dem'reverse_range loop
d := not dem (j);
sub (j) := compute_sum (carry, reg (j), d);
carry := compute_carry (carry, reg (j), d);
end loop;
carry := compute_carry (carry, reg (reg'left), '1');
-- Test
if carry = '0' then
-- Greater than
quot (i) := '0';
else
quot (i) := '1';
reg (reg'left) := '0';
reg (sub'range) := sub;
end if;
end loop;
remain := reg (dem'range);
end divmod;
""")
def disp_vec_vec_udiv(func):
res = """
function "{func}" (L, R : UNSIGNED) return UNSIGNED
is
subtype l_type is UNSIGNED (L'length - 1 downto 0);
subtype r_type is UNSIGNED (R'length - 1 downto 0);
alias la : l_type is l;
alias ra : r_type is r;
variable quot : l_type;
variable rema : r_type;
variable r0 : """ + logic_type() + """ := has_0x (r);
begin
if la'length = 0 or ra'length = 0 then
return null_unsigned;
end if;"""
if logic == 'std':
res += """
if has_0x (l) = 'X' or r0 = 'X' then""" + \
disp_non_logical_warning ('/') + """
return l_type'(others => 'X');
end if;"""
res += """
assert r0 /= '0'
report "NUMERIC_STD.""{func}"": division by 0"
severity error;
divmod (la, ra, quot, rema);"""
if func == '/':
res += """
return quot;"""
else:
res += """
return rema;"""
res += """
end "{func}";\n"""
w(res.format(func=func))
def disp_vec_int_udiv(func):
res = """
function "{func}" (L : UNSIGNED; R : NATURAL) return UNSIGNED
is
constant r_size : natural := size_unsigned (r);
begin
if l'length = 0 then
return null_unsigned;
end if;"""
if func in ['mod', 'rem']:
res += """
return resize (l {func} to_unsigned (r, r_size), l'length);"""
else:
res += """
return l {func} to_unsigned (r, r_size);"""
res += """
end "{func}";\n"""
w(res.format(func=func))
res = """
function "{func}" (L : NATURAL; R : UNSIGNED) return UNSIGNED
is
constant l_size : natural := size_unsigned (l);
begin
if r'length = 0 then
return null_unsigned;
end if;"""
if func == '/':
res += """
return resize (to_unsigned (l, l_size) {func} r, r'length);"""
else:
res += """
return to_unsigned (l, l_size) {func} r;"""
res += """
end "{func}";\n"""
w(res.format(func=func))
def disp_vec_vec_sdiv(func):
res = """
function "{func}" (L, R : SIGNED) return SIGNED
is
subtype l_type is SIGNED (L'length - 1 downto 0);
subtype r_type is SIGNED (R'length - 1 downto 0);
alias la : l_type is l;
alias ra : r_type is r;
subtype l_utype is UNSIGNED (l_type'range);
subtype r_utype is UNSIGNED (r_type'range);
variable lu : l_utype;
variable ru : r_utype;
variable quot : l_utype;
variable rema : r_utype;
variable r0 : """ + logic_type() + """ := has_0x (r);
begin
if la'length = 0 or ra'length = 0 then
return null_signed;
end if;"""
if logic == 'std':
res += """
if has_0x (l) = 'X' or r0 = 'X' then""" + \
disp_non_logical_warning (func) + """
return l_type'(others => 'X');
end if;"""
res += """
assert r0 /= '0'
report "NUMERIC_STD.""{func}"": division by 0"
severity error;"""
res += """
if la (la'left) = '1' then
lu := unsigned (-la);
else
lu := unsigned (la);
end if;
if ra (ra'left) = '1' then
ru := unsigned (-ra);
else
ru := unsigned (ra);
end if;
divmod (lu, ru, quot, rema);"""
if func == '/':
res += """
if (ra (ra'left) xor la (la'left)) = '1' then
return -signed (quot);
else
return signed (quot);
end if;"""
elif func == 'rem':
res += """
-- Result of rem has the sign of the dividend.
if la (la'left) = '1' then
return -signed (rema);
else
return signed (rema);
end if;"""
elif func == 'mod':
res += """
-- Result of mod has the sign of the divisor.
if rema = r_utype'(others => '0') then
-- If the remainder is 0, then the modulus is 0.
return signed (rema);
else
if ra (ra'left) = '1' then
if la (la'left) = '1' then
return -signed (rema);
else
return ra + signed (rema);
end if;
else
if la (la'left) = '1' then
return ra - signed (rema);
else
return signed (rema);
end if;
end if;
end if;"""
res += """
end "{func}";\n"""
w(res.format(func=func))
def disp_vec_int_sdiv(func):
res = """
function "{func}" (L : SIGNED; R : INTEGER) return SIGNED
is
constant r_size : natural := size_signed (r);
begin
if l'length = 0 then
return null_signed;
end if;"""
if func == '/':
res += """
return l {func} to_signed (r, r_size);"""
else:
res += """
return resize (l {func} to_signed (r, r_size), l'length);"""
res += """
end "{func}";\n"""
w(res.format(func=func))
res = """
function "{func}" (L : INTEGER; R : SIGNED) return SIGNED
is
constant l_size : natural := size_signed (l);
begin
if r'length = 0 then
return null_signed;
end if;"""
if func == '/':
res += """
return resize (to_signed (l, max (l_size, r'length)) {func} r, r'length);"""
else:
res += """
return to_signed (l, l_size) {func} r;"""
res += """
end "{func}";\n"""
w(res.format(func=func))
def disp_all_log_funcs():
"Generate all function bodies for logic operators"
for t in vec_types:
disp_resize(t)
for v in vec_types:
disp_vec_not(v)
for f in binary_funcs:
for v in vec_types:
disp_vec_binary(f, v)
disp_vec_vec_gcompare("ucompare", "UNSIGNED")
disp_vec_vec_gcompare("scompare", "SIGNED")
disp_vec_int_gcompare("ucompare", "UNSIGNED", "NATURAL")
disp_vec_int_gcompare("scompare", "SIGNED", "INTEGER")
for f in compare_funcs:
disp_vec_vec_compare(f, "UNSIGNED")
disp_vec_vec_compare(f, "SIGNED")
disp_vec_int_compare(f, "UNSIGNED", "NATURAL")
disp_vec_int_compare(f, "NATURAL", "UNSIGNED")
disp_vec_int_compare(f, "SIGNED", "INTEGER")
disp_vec_int_compare(f, "INTEGER", "SIGNED")
for t in vec_types:
for d in ['left', 'right']:
disp_shift('shift_' + d, t, d);
for d in ['left', 'right']:
disp_rotate('rotate_' + d, t, d);
if v93:
disp_shift_op('"sll"', t, 'left')
disp_shift_op('"srl"', t, 'right')
disp_rotate('"rol"', t, 'left')
disp_rotate('"ror"', t, 'right')
def disp_match(typ):
res = """
function std_match (l, r : {0}) return boolean
is
alias la : {0} (l'length downto 1) is l;
alias ra : {0} (r'length downto 1) is r;
begin
if la'left = 0 or ra'left = 0 then
assert NO_WARNING
report "NUMERIC_STD.STD_MATCH: null argument, returning false"
severity warning;
return false;
elsif la'left /= ra'left then
assert NO_WARNING
report "NUMERIC_STD.STD_MATCH: args length mismatch, returning false"
severity warning;
return false;
else
for i in la'range loop
if not match_table (la (i), ra (i)) then
return false;
end if;
end loop;
return true;
end if;
end std_match;\n"""
w(res.format(typ))
def disp_all_match_funcs():
disp_match('std_ulogic_vector');
disp_match('std_logic_vector');
disp_match('UNSIGNED');
disp_match('SIGNED');
def disp_all_arith_funcs():
"Generate all function bodies for logic operators"
for op in ['+', '-']:
disp_vec_vec_binary(op, "UNSIGNED")
disp_vec_vec_binary(op, "SIGNED")
disp_vec_int_binary(op, "UNSIGNED", "NATURAL")
disp_vec_int_binary(op, "NATURAL", "UNSIGNED")
disp_vec_int_binary(op, "SIGNED", "INTEGER")
disp_vec_int_binary(op, "INTEGER", "SIGNED")
disp_vec_vec_mul('*', 'UNSIGNED')
disp_vec_vec_mul('*', 'SIGNED')
disp_vec_int_mul('UNSIGNED', 'NATURAL')
disp_vec_int_mul('SIGNED', 'INTEGER')
disp_int_vec_mul('NATURAL', 'UNSIGNED')
disp_int_vec_mul('INTEGER', 'SIGNED')
disp_has_0x('UNSIGNED')
disp_divmod()
disp_size()
disp_vec_vec_udiv('/')
disp_vec_int_udiv('/')
disp_vec_vec_udiv('rem')
disp_vec_int_udiv('rem')
disp_vec_vec_udiv('mod')
disp_vec_int_udiv('mod')
disp_has_0x('SIGNED')
disp_neg("-")
disp_neg("abs")
disp_vec_vec_sdiv('/')
disp_vec_int_sdiv('/')
disp_vec_vec_sdiv('rem')
disp_vec_int_sdiv('rem')
disp_vec_vec_sdiv('mod')
disp_vec_int_sdiv('mod')
# Patterns to replace
pats = {' @LOG\n' : disp_all_log_funcs,
' @ARITH\n' : disp_all_arith_funcs,
' @MATCH\n' : disp_all_match_funcs }
spec_file='numeric_std.vhdl'
#proto_file='numeric_std-body.proto'
def gen_body(proto_file):
w('-- This -*- vhdl -*- file was generated from ' + proto_file + '\n')
for line in open(proto_file):
if line in pats:
pats[line]()
continue
w(line)
# Copy spec
for log in logics:
for std in ['87', '93']:
out=open('numeric_' + log + '.v' + std, 'w')
for line in open('numeric_' + log + '.proto'):
if line == ' @COMMON\n':
for lcom in open('numeric_common.proto'):
if lcom[0:2] == '--':
pass
elif std == '87' and ('"xnor"' in lcom
or '"sll"' in lcom
or '"srl"' in lcom
or '"rol"' in lcom
or '"ror"' in lcom):
w("--" + lcom[2:])
else:
w(lcom)
else:
w(line)
out.close()
# Generate bodies
v93=False
for l in logics:
logic = l
out=open('numeric_{0}-body.v87'.format(l), 'w')
gen_body('numeric_{0}-body.proto'.format(l))
out.close()
v93=True
binary_funcs.append("xnor")
for l in logics:
logic = l
out=open('numeric_{0}-body.v93'.format(l), 'w')
gen_body('numeric_{0}-body.proto'.format(l))
out.close()
|
emogenet/ghdl
|
libraries/openieee/build_numeric.py
|
Python
|
gpl-2.0
| 32,279
|
import re
from widgetastic_patternfly import Input, BootstrapSelect
from wrapanapi.hawkular import Hawkular
from cfme.common import TopologyMixin
from cfme.common.provider import DefaultEndpoint, DefaultEndpointForm
from cfme.utils.appliance import Navigatable
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.varmeth import variable
from . import MiddlewareProvider
from . import _get_providers_page, _db_select_query
from . import download, MiddlewareBase
class HawkularEndpoint(DefaultEndpoint):
@property
def view_value_mapping(self):
return {'security_protocol': self.security_protocol,
'hostname': self.hostname,
'api_port': self.api_port,
}
class HawkularEndpointForm(DefaultEndpointForm):
security_protocol = BootstrapSelect('default_security_protocol')
api_port = Input('default_api_port')
class HawkularProvider(MiddlewareBase, TopologyMixin, MiddlewareProvider):
"""
HawkularProvider class holds provider data. Used to perform actions on hawkular provider page
Args:
name: Name of the provider
endpoints: one or several provider endpoints like DefaultEndpoint. it should be either dict
in format dict{endpoint.name, endpoint, endpoint_n.name, endpoint_n}, list of endpoints or
mere one endpoint
hostname: Hostname/IP of the provider
port: http/https port of hawkular provider
credentials: see Credential inner class.
key: The CFME key of the provider in the yaml.
db_id: database row id of provider
Usage:
myprov = HawkularProvider(name='foo',
endpoints=endpoint,
hostname='localhost',
port=8080,
credentials=Provider.Credential(principal='admin', secret='foobar')))
myprov.create()
myprov.num_deployment(method="ui")
"""
STATS_TO_MATCH = MiddlewareProvider.STATS_TO_MATCH +\
['num_server', 'num_domain', 'num_deployment', 'num_datasource', 'num_messaging']
property_tuples = MiddlewareProvider.property_tuples +\
[('name', 'Name'), ('hostname', 'Host Name'), ('port', 'Port'), ('provider_type', 'Type')]
type_name = "hawkular"
mgmt_class = Hawkular
db_types = ["Hawkular::MiddlewareManager"]
endpoints_form = HawkularEndpointForm
def __init__(self, name=None, endpoints=None, hostname=None, port=None,
credentials=None, key=None,
appliance=None, sec_protocol=None, **kwargs):
Navigatable.__init__(self, appliance=appliance)
self.name = name
self.hostname = hostname
self.port = port
self.provider_type = 'Hawkular'
if not credentials:
credentials = {}
self.credentials = credentials
self.key = key
self.sec_protocol = sec_protocol if sec_protocol else 'Non-SSL'
self.db_id = kwargs['db_id'] if 'db_id' in kwargs else None
self.endpoints = self._prepare_endpoints(endpoints)
@property
def view_value_mapping(self):
"""Maps values to view attrs"""
return {
'name': self.name,
'prov_type': 'Hawkular'
}
@variable(alias='db')
def num_deployment(self):
return self._num_db_generic('middleware_deployments')
@num_deployment.variant('ui')
def num_deployment_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Deployments"))
@variable(alias='db')
def num_server(self):
return self._num_db_generic('middleware_servers')
@num_server.variant('ui')
def num_server_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Servers"))
@variable(alias='db')
def num_server_group(self):
res = self.appliance.db.client.engine.execute(
"SELECT count(*) "
"FROM ext_management_systems, middleware_domains, middleware_server_groups "
"WHERE middleware_domains.ems_id=ext_management_systems.id "
"AND middleware_domains.id=middleware_server_groups.domain_id "
"AND ext_management_systems.name='{0}'".format(self.name))
return int(res.first()[0])
@variable(alias='db')
def num_datasource(self):
return self._num_db_generic('middleware_datasources')
@num_datasource.variant('ui')
def num_datasource_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Datasources"))
@variable(alias='db')
def num_domain(self):
return self._num_db_generic('middleware_domains')
@num_domain.variant('ui')
def num_domain_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Domains"))
@variable(alias='db')
def num_messaging(self):
return self._num_db_generic('middleware_messagings')
@num_messaging.variant('ui')
def num_messaging_ui(self, reload_data=True):
self.load_details(refresh=reload_data)
return int(self.get_detail("Relationships", "Middleware Messagings"))
@variable(alias='ui')
def is_refreshed(self, reload_data=True):
self.load_details(refresh=reload_data)
if re.match('Success.*Minute.*Ago', self.get_detail("Status", "Last Refresh")):
return True
else:
return False
@is_refreshed.variant('db')
def is_refreshed_db(self):
ems = self.appliance.db.client['ext_management_systems']
dates = self.appliance.db.client.session.query(ems.created_on,
ems.updated_on).filter(ems.name == self.name).first()
return dates.updated_on > dates.created_on
@variable(alias='ui')
def is_valid(self, reload_data=True):
self.load_details(refresh=reload_data)
if re.match('Valid.*Ok', self.get_detail("Status", "Authentication status")):
return True
else:
return False
@classmethod
def download(cls, extension):
view = _get_providers_page()
download(view, extension)
def load_details(self, refresh=False):
"""Navigate to Details and load `db_id` if not set"""
view = navigate_to(self, 'Details')
if not self.db_id or refresh:
tmp_provider = _db_select_query(
name=self.name, type='ManageIQ::Providers::Hawkular::MiddlewareManager').first()
self.db_id = tmp_provider.id
if refresh:
view.browser.selenium.refresh()
view.flush_widget_cache()
return view
def load_topology_page(self):
return navigate_to(self, 'TopologyFromDetails')
def recheck_auth_status(self):
view = self.load_details(refresh=True)
view.toolbar.authentication.item_select("Re-check Authentication Status")
@staticmethod
def from_config(prov_config, prov_key, appliance=None):
credentials_key = prov_config['credentials']
credentials = HawkularProvider.process_credential_yaml_key(credentials_key)
endpoints = {}
endpoints[HawkularEndpoint.name] = HawkularEndpoint(
**prov_config['endpoints'][HawkularEndpoint.name])
return HawkularProvider(
name=prov_config['name'],
endpoints=endpoints,
key=prov_key,
hostname=prov_config['hostname'],
sec_protocol=prov_config.get('sec_protocol'),
port=prov_config['port'],
credentials={'default': credentials},
appliance=appliance)
|
jkandasa/integration_tests
|
cfme/middleware/provider/hawkular.py
|
Python
|
gpl-2.0
| 7,860
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Easy AVR USB Keyboard Firmware Keymapper
# Copyright (C) 2018 David Howland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module contains code to load legacy user save data."""
import pickle
import re
from .build import NUM_MACROS, NULL_SYMBOL, key_mode_map, led_modes, led_assignments
from .scancodes import scancodes
from .userdata import Map
legacy_layers = ["Default", "Layer 1", "Layer 2", "Layer 3", "Layer 4",
"Layer 5", "Layer 6", "Layer 7", "Layer 8", "Layer 9"]
class LegacySaveFileException(Exception):
"""Raised when an error is encountered while loading a legacy layout file."""
pass
def load_legacy(user_data, datfile):
"""Load the legacy .dat save file from the path given by `datfile` and populate
the UserData object given by `user_data`.
"""
legacy_data = open_legacy(datfile)
convert_legacy(user_data, legacy_data)
def open_legacy(datfile):
"""Opens and decodes the pickled data in a legacy .dat save file. `datfile`
is a path to the file. The function returns a dictionary with an item for each
component of the legacy file.
"""
with open(datfile, 'rb') as fdin:
data = pickle.load(fdin)
if len(data) < 12:
raise LegacySaveFileException("The .dat file is either broken or too old.")
unique_id = data[1]
maps = data[2]
macros = data[3]
actions = data[4]
modes = data[5]
wmods = data[6]
layout_mod = data[8]
leds = data[9]
if len(data) > 11:
advancedleds = data[11]
useadvancedleds = data[12]
else:
advancedleds = [(255, 0)] * len(led_assignments)
useadvancedleds = False
if len(data) > 13:
ledlayers = data[13]
else:
ledlayers = [0, 0, 0, 0, 0]
# fixes for older versions (renamed layers)
for kmap in (maps, actions, modes, wmods):
if 'Fn' in kmap:
kmap['Layer 1'] = kmap['Fn']
del kmap['Fn']
# fixes for older versions (renamed/removed scancodes)
for layer in maps:
for row in maps[layer]:
for i, k in enumerate(row):
if k == "SCANCODE_DEBUG":
row[i] = "SCANCODE_CONFIG"
elif k == "SCANCODE_LOCKINGCAPS":
row[i] = "HID_KEYBOARD_SC_LOCKING_CAPS_LOCK"
elif k == "SCANCODE_FN":
row[i] = "SCANCODE_FN1"
elif k not in scancodes:
row[i] = NULL_SYMBOL
# fixes for older versions (renamed leds)
leds = ['Any Fn Active' if (x == 'Fn Lock') else x for x in leds]
leds = ['Fn1 Active' if (x == 'Fn Active') else x for x in leds]
# fixes for older versions (added macros)
extention = NUM_MACROS - len(macros)
if extention > 0:
macros.extend([''] * extention)
return {
'unique_id': unique_id,
'layout_mod': layout_mod,
'maps': maps,
'actions': actions,
'modes': modes,
'wmods': wmods,
'macros': macros,
'leds': leds,
'advancedleds': advancedleds,
'useadvancedleds': useadvancedleds,
'ledlayers': ledlayers,
}
def convert_legacy(user_data, legacy_data):
"""Converts the data from a legacy save file into a `user_data` object. `user_data`
should be a fresh instance of UserData and `legacy_data` is the output from a
successful call to open_legacy().
"""
# can't save to legacy file
user_data.path = None
# get good defaults to start from
user_data.new(legacy_data['unique_id'], legacy_data['layout_mod'])
# transmogrify the keymap data
for li, layer in enumerate(legacy_layers):
for ri, rowdef in enumerate(user_data.config.keyboard_definition):
if isinstance(rowdef, int):
continue
for ci, keydef in enumerate(rowdef):
keydim, matrix, _ = keydef
if user_data.layout_mod:
mod_map = user_data.config.alt_layouts[user_data.layout_mod]
keydim = mod_map.get((ri, ci), keydim)
if isinstance(keydim, tuple) and isinstance(matrix, tuple):
row, col = matrix
map = Map(legacy_data['maps'][layer][ri][ci],
key_mode_map[legacy_data['modes'][layer][ri][ci]],
legacy_data['actions'][layer][ri][ci],
legacy_data['wmods'][layer][ri][ci])
user_data.keymap[li][row][col] = map
# translate the macro data
user_data.macros = [translate_macro(macro) for macro in legacy_data['macros']]
# adapt the led data
user_data.led_modes = []
for old_assignment in legacy_data['leds']:
if old_assignment == 'Backlight':
user_data.led_modes.append(led_modes.index('Backlight'))
elif old_assignment in led_assignments:
user_data.led_modes.append(led_modes.index('Indicator'))
else:
user_data.led_modes.append(led_modes.index('Disabled'))
if legacy_data['useadvancedleds']:
for i, func in enumerate(legacy_data['advancedleds']):
led_id, _ = func
if led_id < len(user_data.led_modes):
user_data.led_modes[led_id] = led_modes.index('Indicator')
user_data.led_funcs[i] = func
# copy the rest
user_data.led_layers = legacy_data['ledlayers']
def translate_macro(input):
"""Translate the escape sequences in the original macro mini-language into
the equivalent representations in the new macro mini-language.
"""
# remove the special characters
input = input.replace("\\\\,", "\\")
input = input.replace("\\n,", "\n")
input = input.replace("\\t,", "\t")
# escape any $ symbols
input = input.replace("$", "$$")
# convert keyword format
input = re.sub(r'\\([A-Z0-9_]+\()', r'$\1', input)
# convert function/mod format
input = re.sub(r'\\([A-Z0-9_]+),', r'${\1}', input)
return input
|
dhowland/EasyAVR
|
keymapper/easykeymap/legacy.py
|
Python
|
gpl-2.0
| 6,758
|
import math
import numpy
import pyaudio
import time
import ntplib
def sine(frequency, length, rate):
length = int(length * rate)
factor = float(frequency) * (math.pi * 2) / rate
return numpy.sin(numpy.arange(length) * factor)
chunks = []
chunks.append(sine(440, 1, 44100))
chunk = numpy.concatenate(chunks) * 0.25
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=44100, output=1)
last = 0
print("[ntp-sync] getting clock")
c = ntplib.NTPClient()
response = c.request('pool.ntp.org', version=3)
print("[ntp-sync] clock offset %s" % response.offset)
while True:
curtime = int(math.floor(time.time() + response.offset))
if (curtime % 5) == 0 and curtime > last:
print curtime
print("beep")
last = curtime
stream.write(chunk.astype(numpy.float32).tostring())
stream.close()
p.terminate()
|
fardog/river
|
working/ntp-sync.py
|
Python
|
gpl-2.0
| 877
|
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2007 Richard Purdie
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""
Use this class to fork off a thread to recieve event callbacks from the bitbake
server and queue them for the UI to process. This process must be used to avoid
client/server deadlocks.
"""
import socket, threading, pickle, collections
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
class BBUIEventQueue:
def __init__(self, BBServer, clientinfo=("localhost, 0")):
self.eventQueue = []
self.eventQueueLock = threading.Lock()
self.eventQueueNotify = threading.Event()
self.BBServer = BBServer
self.clientinfo = clientinfo
server = UIXMLRPCServer(self.clientinfo)
self.host, self.port = server.socket.getsockname()
server.register_function( self.system_quit, "event.quit" )
server.register_function( self.send_event, "event.sendpickle" )
server.socket.settimeout(1)
self.EventHandle = None
# the event handler registration may fail here due to cooker being in invalid state
# this is a transient situation, and we should retry a couple of times before
# giving up
for count_tries in range(5):
ret = self.BBServer.registerEventHandler(self.host, self.port)
if isinstance(ret, collections.Iterable):
self.EventHandle, error = ret
else:
self.EventHandle = ret
error = ""
if self.EventHandle != None:
break
errmsg = "Could not register UI event handler. Error: %s, host %s, "\
"port %d" % (error, self.host, self.port)
bb.warn("%s, retry" % errmsg)
import time
time.sleep(1)
else:
raise Exception(errmsg)
self.server = server
self.t = threading.Thread()
self.t.setDaemon(True)
self.t.run = self.startCallbackHandler
self.t.start()
def getEvent(self):
self.eventQueueLock.acquire()
if len(self.eventQueue) == 0:
self.eventQueueLock.release()
return None
item = self.eventQueue.pop(0)
if len(self.eventQueue) == 0:
self.eventQueueNotify.clear()
self.eventQueueLock.release()
return item
def waitEvent(self, delay):
self.eventQueueNotify.wait(delay)
return self.getEvent()
def queue_event(self, event):
self.eventQueueLock.acquire()
self.eventQueue.append(event)
self.eventQueueNotify.set()
self.eventQueueLock.release()
def send_event(self, event):
self.queue_event(pickle.loads(event))
def startCallbackHandler(self):
self.server.timeout = 1
bb.utils.set_process_name("UIEventQueue")
while not self.server.quit:
try:
self.server.handle_request()
except Exception as e:
import traceback
logger.error("BBUIEventQueue.startCallbackHandler: Exception while trying to handle request: %s\n%s" % (e, traceback.format_exc()))
self.server.server_close()
def system_quit( self ):
"""
Shut down the callback thread
"""
try:
self.BBServer.unregisterEventHandler(self.EventHandle)
except:
pass
self.server.quit = True
class UIXMLRPCServer (SimpleXMLRPCServer):
def __init__( self, interface ):
self.quit = False
SimpleXMLRPCServer.__init__( self,
interface,
requestHandler=SimpleXMLRPCRequestHandler,
logRequests=False, allow_none=True, use_builtin_types=True)
def get_request(self):
while not self.quit:
try:
sock, addr = self.socket.accept()
sock.settimeout(1)
return (sock, addr)
except socket.timeout:
pass
return (None, None)
def close_request(self, request):
if request is None:
return
SimpleXMLRPCServer.close_request(self, request)
def process_request(self, request, client_address):
if request is None:
return
SimpleXMLRPCServer.process_request(self, request, client_address)
|
schleichdi2/OPENNFR-6.3-CORE
|
bitbake/lib/bb/ui/uievent.py
|
Python
|
gpl-2.0
| 4,475
|
from Sire.IO import *
from Sire.MM import *
from Sire.System import *
from Sire.Mol import *
from Sire.Maths import *
from Sire.FF import *
from Sire.Move import *
from Sire.Units import *
from Sire.Vol import *
from Sire.Qt import *
import os
coul_cutoff = 20 * angstrom
lj_cutoff = 10 * angstrom
amber = Amber()
(molecules, space) = amber.readCrdTop("test/io/waterbox.crd", "test/io/waterbox.top")
system = System()
swapwaters = MoleculeGroup("swapwaters")
waters = MoleculeGroup("waters")
molnums = molecules.molNums();
for molnum in molnums:
water = molecules[molnum].molecule()
if water.residue().number() == ResNum(2025):
center_water = water
swapwaters.add(center_water)
center_point = center_water.evaluate().center()
for molnum in molnums:
if molnum != center_water.number():
water = molecules[molnum].molecule()
if Vector.distance(center_point, water.evaluate().center()) < 7.5:
water = water.residue().edit().setProperty("PDB-residue-name", "SWP").commit()
swapwaters.add(water)
else:
waters.add(water)
system.add(swapwaters)
system.add(waters)
gridff = GridFF("gridff")
gridff.setCombiningRules("arithmetic")
print("Combining rules are %s" % gridff.combiningRules())
gridff.setBuffer(2 * angstrom)
gridff.setGridSpacing( 0.5 * angstrom )
gridff.setLJCutoff(lj_cutoff)
gridff.setCoulombCutoff(coul_cutoff)
gridff.setShiftElectrostatics(True)
#gridff.setUseAtomisticCutoff(True)
#gridff.setUseReactionField(True)
cljgridff = CLJGrid()
cljgridff.setCLJFunction( CLJShiftFunction(coul_cutoff,lj_cutoff) )
cljgridff.setFixedAtoms( CLJAtoms(waters.molecules()) )
cljatoms = CLJAtoms(swapwaters.molecules())
cljgridff.setGridDimensions( cljatoms, 0.5 * angstrom, 2 * angstrom )
print("Grid box equals %s" % cljgridff.grid())
cljboxes = CLJBoxes(cljatoms)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
cljgridff.setUseGrid(False)
(cnrg, ljnrg) = cljgridff.calculate(cljboxes)
print("CLJGridFF: %s %s %s" % (cnrg+ljnrg, cnrg, ljnrg))
gridff.add(swapwaters, MGIdx(0))
gridff.add(waters, MGIdx(1))
gridff.setSpace( Cartesian() )
gridff2 = GridFF2("gridff2")
gridff2.setCombiningRules("arithmetic")
gridff2.setBuffer(2*angstrom)
gridff2.setGridSpacing( 0.5 * angstrom )
gridff2.setLJCutoff(lj_cutoff)
gridff2.setCoulombCutoff(coul_cutoff)
gridff2.setShiftElectrostatics(True)
#gridff2.setUseAtomisticCutoff(True)
#gridff2.setUseReactionField(True)
gridff2.add( swapwaters, MGIdx(0) )
gridff2.addFixedAtoms(waters.molecules())
gridff2.setSpace( Cartesian() )
testff = TestFF()
testff.add( swapwaters.molecules() )
testff.addFixedAtoms(waters.molecules())
testff.setCutoff(coul_cutoff, lj_cutoff)
cljff = InterGroupCLJFF("cljff")
cljff.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff.add(swapwaters, MGIdx(0))
cljff.add(waters, MGIdx(1))
cljff.setShiftElectrostatics(True)
#cljff.setUseAtomisticCutoff(True)
#cljff.setUseReactionField(True)
cljff.setSpace( Cartesian() )
cljff2 = InterCLJFF("cljff2")
cljff2.setSwitchingFunction( HarmonicSwitchingFunction(coul_cutoff,coul_cutoff,lj_cutoff,lj_cutoff) )
cljff2.add(waters)
cljff2.setShiftElectrostatics(True)
cljff2.setSpace( Cartesian() )
print(gridff.energies())
print(gridff2.energies())
print("\nEnergies")
print(gridff.energies())
print(gridff2.energies())
t = QTime()
t.start()
nrgs = cljff.energies()
ms = t.elapsed()
print(cljff.energies())
print("Took %d ms" % ms)
testff.calculateEnergy()
t.start()
nrgs = cljff2.energies()
ms = t.elapsed()
print("\nExact compare")
print(cljff2.energies())
print("Took %d ms" % ms)
|
chryswoods/SireTests
|
unittests/SireMM/testgridff2.py
|
Python
|
gpl-2.0
| 3,699
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Dropbox upload daemon.
"""
from fnmatch import fnmatch
from operator import itemgetter
from os import listdir, path, mknod, stat
from time import strptime, sleep, time
from dropbox.client import DropboxClient, DropboxOAuth2FlowNoRedirect
from dropbox.rest import ErrorResponse
from urllib3.exceptions import MaxRetryError
from utils import settings
from utils.daemons import DaemonBase, init
from utils.database import DatabaseConnection
__author__ = "wavezone"
__copyright__ = "Copyright 2016, MRG-Infó Bt."
__credits__ = ["Groma István (wavezone)"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Groma István"
__email__ = "wavezone@mrginfo.com"
class UploadDaemon(DaemonBase):
""" Dropbox upload daemon.
"""
first_time = False
max_size = 10 * (1024 ** 3)
access_token = settings.config.access_token
def __init__(self, directory: str):
""" Constructor.
"""
super().__init__()
self.directory = directory
if self.access_token is None or self.access_token == '':
# noinspection SpellCheckingInspection
flow = DropboxOAuth2FlowNoRedirect('m9cijknmu1po39d', 'bi8dlhif9215qg3')
authorize_url = flow.start()
print("OAuth 2 authorization process")
print("1. Go to: {}".format(authorize_url))
print("2. Click Allow (you might have to log in first).")
print("3. Copy the authorization code.")
code = input("4. Enter the authorization code here: ").strip()
self.access_token, user_id = flow.finish(code)
settings.config.access_token = self.access_token
self.first_time = True
@staticmethod
def _get(client: DropboxClient) -> list:
""" Get files from Dropbox.
"""
try:
metadata = client.metadata('/')
except (MaxRetryError, ErrorResponse):
return None
return [
{
'file': m['path'],
'modified': strptime(m['modified'], '%a, %d %b %Y %H:%M:%S %z'),
'size': m['bytes']
}
for m in metadata['contents']
if not m['is_dir']
]
def _upload(self, client: DropboxClient):
""" Upload new files from directory.
"""
now = time()
for filename in listdir(self.directory):
if fnmatch(filename, '*.upl'):
continue
local_name = '/' + filename
full_name = path.join(self.directory, filename)
upl_name = "{}.upl".format(full_name)
if not path.isfile(upl_name) and stat(full_name).st_mtime < now - 60:
with open(full_name, 'rb') as file_stream:
try:
client.put_file(local_name, file_stream)
share = client.share(local_name)
except (MaxRetryError, ErrorResponse):
continue
with DatabaseConnection() as db:
update = """
UPDATE events
SET url = '{}',
uploaded = current_timestamp
WHERE file = '{}'
""".format(share['url'], full_name)
db.dml(update)
try:
mknod(upl_name)
except FileExistsError:
pass
print("{} was uploaded to Dropbox.".format(filename))
def _rotate(self, client: DropboxClient, files: list):
""" Rotate Dropbox in order to save storage.
"""
total_size = sum(item['size'] for item in files)
files_history = sorted(files, key=itemgetter('modified'))
for file in files_history:
if total_size < self.max_size:
break
try:
client.file_delete(file['file'])
print("{} was deleted from Dropbox.".format(file['file']))
total_size -= file['size']
except (MaxRetryError, ErrorResponse):
pass
def run(self):
""" Upload logic.
"""
if self.first_time:
return
print("Uploading from {} to Dropbox.".format(self.directory), flush=True)
try:
client = DropboxClient(self.access_token)
while True:
self._upload(client)
files = self._get(client)
if files is not None:
self._rotate(client, files)
print("Going idle...", end='', flush=True)
sleep(2 * 60)
print("DONE", flush=True)
except KeyboardInterrupt:
print()
except SystemExit:
pass
finally:
print("No longer uploading from {} to Dropbox.".format(self.directory), flush=True)
if __name__ == '__main__':
my_daemon = UploadDaemon(settings.config.working_dir)
init(my_daemon)
|
MrgInfo/PiCam
|
upload.py
|
Python
|
gpl-2.0
| 5,086
|
# -- coding: utf-8 --
# ===========================================================================
# eXe
# Copyright 2012, Pedro Peña Pérez, Open Phoenix IT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
'''
@author: Pedro Peña Pérez
'''
import sys
import logging
from exe.engine.persistxml import encodeObjectToXML
from exe.engine.path import Path
from exe.engine.package import Package
from exe.export.scormexport import ScormExport
from exe.export.imsexport import IMSExport
from exe.export.websiteexport import WebsiteExport
from exe.export.singlepageexport import SinglePageExport
from exe.export.xliffexport import XliffExport
from exe.export.epub3export import Epub3Export
from exe.export.textexport import TextExport
from exe.export.epub3subexport import Epub3SubExport
LOG = logging.getLogger(__name__)
ENCODING = sys.stdout.encoding or "UTF-8"
class CmdlineExporter(object):
extensions = {'xml': '.xml',
'scorm12': '.zip',
'scorm2004': '.zip',
'agrega': '.zip',
'ims': '.zip',
'website': '',
'webzip': '.zip',
'singlepage': '',
'xliff': '.xlf',
'epub3': '.epub',
'report': '.csv',
'text': '.txt'
}
def __init__(self, config, options):
self.config = config
self.options = options
self.web_dir = Path(self.config.webDir)
self.styles_dir = None
def do_export(self, inputf, outputf):
if hasattr(self, 'export_' + self.options["export"]):
LOG.debug("Exporting to type %s, in: %s, out: %s, overwrite: %s" \
% (self.options["export"], inputf, outputf, str(self.options["overwrite"])))
if not outputf:
if self.options["export"] in ('website', 'singlepage'):
outputf = inputf.rsplit(".elp")[0]
else:
outputf = inputf + self.extensions[self.options["export"]]
outputfp = Path(outputf)
if outputfp.exists() and not self.options["overwrite"]:
error = _(u'"%s" already exists.\nPlease try again \
with a different filename') % outputf
raise Exception(error.encode(ENCODING))
else:
if outputfp.exists() and self.options["overwrite"]:
if outputfp.isdir():
for filen in outputfp.walkfiles():
filen.remove()
outputfp.rmdir()
else:
outputfp.remove()
pkg = Package.load(inputf)
LOG.debug("Package %s loaded" % (inputf))
if not pkg:
error = _(u"Invalid input package")
raise Exception(error.encode(ENCODING))
self.styles_dir = self.config.stylesDir / pkg.style
LOG.debug("Styles dir: %s" % (self.styles_dir))
pkg.exportSource = self.options['editable']
getattr(self, 'export_' + self.options["export"])(pkg, outputf)
return outputf
else:
raise Exception(_(u"Export format not implemented")\
.encode(ENCODING))
def export_xml(self, pkg, outputf):
open(outputf, "w").write(encodeObjectToXML(pkg))
def export_scorm12(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm1.2')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_scorm2004(self, pkg, outputf):
scormExport = ScormExport(self.config, self.styles_dir, outputf,
'scorm2004')
pkg.scowsinglepage = self.options['single-page']
pkg.scowwebsite = self.options['website']
scormExport.export(pkg)
def export_ims(self, pkg, outputf):
imsExport = IMSExport(self.config, self.styles_dir, outputf)
imsExport.export(pkg)
def export_website(self, pkg, outputf):
outputfp = Path(outputf)
outputfp.makedirs()
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.export(pkg)
def export_webzip(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf)
websiteExport.exportZip(pkg)
def export_singlepage(self, pkg, outputf, print_flag=0):
images_dir = self.web_dir.joinpath('images')
scripts_dir = self.web_dir.joinpath('scripts')
css_dir = self.web_dir.joinpath('css')
templates_dir = self.web_dir.joinpath('templates')
singlePageExport = SinglePageExport(self.styles_dir, outputf, \
images_dir, scripts_dir, css_dir, templates_dir)
singlePageExport.export(pkg, print_flag)
def export_xliff(self, pkg, outputf):
xliff = XliffExport(self.config, outputf, \
source_copied_in_target=self.options["copy-source"], \
wrap_cdata=self.options["wrap-cdata"])
xliff.export(pkg)
def export_epub3(self, pkg, outputf):
epub3Export = Epub3Export(self.config, self.styles_dir, outputf)
epub3Export.export(pkg)
def export_subepub3(self, pkg, outputf):
epub3SubExport = Epub3SubExport(self.config, self.styles_dir, outputf)
epub3SubExport.export(pkg)
def export_report(self, pkg, outputf):
websiteExport = WebsiteExport(self.config, self.styles_dir, outputf, report=True)
websiteExport.export(pkg)
def export_text(self, pkg, outputf):
textExport =TextExport(outputf)
textExport.export(pkg)
textExport.save(outputf)
|
exelearning/iteexe
|
exe/export/cmdlineexporter.py
|
Python
|
gpl-2.0
| 6,556
|
# -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2013 Guake authors
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
import inspect
import time
# You can put calls to p() everywhere in this page to inspect timing
# g_start = time.time()
# def p():
# print(time.time() - g_start, __file__, inspect.currentframe().f_back.f_lineno)
import logging
import os
import signal
import subprocess
import sys
import uuid
from locale import gettext as _
from optparse import OptionParser
log = logging.getLogger(__name__)
from guake.globals import NAME
from guake.globals import bindtextdomain
# When we are in the document generation on readthedocs, we do not have paths.py generated
try:
from guake.paths import LOCALE_DIR
bindtextdomain(NAME, LOCALE_DIR)
except: # pylint: disable=bare-except
pass
def main():
"""Parses the command line parameters and decide if dbus methods
should be called or not. If there is already a guake instance
running it will be used and a True value will be returned,
otherwise, false will be returned.
"""
# Force to xterm-256 colors for compatibility with some old command line programs
os.environ["TERM"] = "xterm-256color"
# Force use X11 backend underwayland
os.environ["GDK_BACKEND"] = "x11"
# do not use version keywords here, pbr might be slow to find the version of Guake module
parser = OptionParser()
parser.add_option(
'-V',
'--version',
dest='version',
action='store_true',
default=False,
help=_('Show Guake version number and exit')
)
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
default=False,
help=_('Enable verbose logging')
)
parser.add_option(
'-f',
'--fullscreen',
dest='fullscreen',
action='store_true',
default=False,
help=_('Put Guake in fullscreen mode')
)
parser.add_option(
'-t',
'--toggle-visibility',
dest='show_hide',
action='store_true',
default=False,
help=_('Toggles the visibility of the terminal window')
)
parser.add_option(
'--show',
dest="show",
action='store_true',
default=False,
help=_('Shows Guake main window')
)
parser.add_option(
'--hide',
dest='hide',
action='store_true',
default=False,
help=_('Hides Guake main window')
)
parser.add_option(
'-p',
'--preferences',
dest='show_preferences',
action='store_true',
default=False,
help=_('Shows Guake preference window')
)
parser.add_option(
'-a',
'--about',
dest='show_about',
action='store_true',
default=False,
help=_('Shows Guake\'s about info')
)
parser.add_option(
'-n',
'--new-tab',
dest='new_tab',
action='store',
default='',
help=_('Add a new tab (with current directory set to NEW_TAB)')
)
parser.add_option(
'-s',
'--select-tab',
dest='select_tab',
action='store',
default='',
help=_('Select a tab (SELECT_TAB is the index of the tab)')
)
parser.add_option(
'-g',
'--selected-tab',
dest='selected_tab',
action='store_true',
default=False,
help=_('Return the selected tab index.')
)
parser.add_option(
'-l',
'--selected-tablabel',
dest='selected_tablabel',
action='store_true',
default=False,
help=_('Return the selected tab label.')
)
parser.add_option(
'-e',
'--execute-command',
dest='command',
action='store',
default='',
help=_('Execute an arbitrary command in the selected tab.')
)
parser.add_option(
'-i',
'--tab-index',
dest='tab_index',
action='store',
default='0',
help=_('Specify the tab to rename. Default is 0. Can be used to select tab by UUID.')
)
parser.add_option(
'--bgcolor',
dest='bgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) background color of '
'the selected tab.')
)
parser.add_option(
'--fgcolor',
dest='fgcolor',
action='store',
default='',
help=_('Set the hexadecimal (#rrggbb) foreground color of the '
'selected tab.')
)
parser.add_option(
'--rename-tab',
dest='rename_tab',
metavar='TITLE',
action='store',
default='',
help=_(
'Rename the specified tab by --tab-index. Reset to default if TITLE is '
'a single dash "-".'
)
)
parser.add_option(
'-r',
'--rename-current-tab',
dest='rename_current_tab',
metavar='TITLE',
action='store',
default='',
help=_('Rename the current tab. Reset to default if TITLE is a '
'single dash "-".')
)
parser.add_option(
'-q',
'--quit',
dest='quit',
action='store_true',
default=False,
help=_('Says to Guake go away =(')
)
parser.add_option(
'-u',
'--no-startup-script',
dest='execute_startup_script',
action='store_false',
default=True,
help=_('Do not execute the start up script')
)
options = parser.parse_args()[0]
if options.version:
from guake import gtk_version
from guake import guake_version
from guake import vte_version
from guake import vte_runtime_version
print('Guake Terminal: {}'.format(guake_version()))
print('VTE: {}'.format(vte_version()))
print('VTE runtime: {}'.format(vte_runtime_version()))
print('Gtk: {}'.format(gtk_version()))
sys.exit(0)
import dbus
from guake.dbusiface import DBUS_NAME
from guake.dbusiface import DBUS_PATH
from guake.dbusiface import DbusManager
from guake.guake_logging import setupLogging
instance = None
# Trying to get an already running instance of guake. If it is not
# possible, lets create a new instance. This function will return
# a boolean value depending on this decision.
try:
bus = dbus.SessionBus()
remote_object = bus.get_object(DBUS_NAME, DBUS_PATH)
already_running = True
except dbus.DBusException:
# can now configure the logging
setupLogging(options.verbose)
# COLORTERM is an environment variable set by some terminal emulators such as
# gnome-terminal.
# To avoid confusing applications running inside Guake, clean up COLORTERM at startup.
if "COLORTERM" in os.environ:
del os.environ['COLORTERM']
log.info("Guake not running, starting it")
# late loading of the Guake object, to speed up dbus comm
from guake.guake_app import Guake
instance = Guake()
remote_object = DbusManager(instance)
already_running = False
only_show_hide = True
if options.fullscreen:
remote_object.fullscreen()
if options.show:
remote_object.show_from_remote()
if options.hide:
remote_object.hide_from_remote()
if options.show_preferences:
remote_object.show_prefs()
only_show_hide = False
if options.new_tab:
remote_object.add_tab(options.new_tab)
only_show_hide = False
if options.select_tab:
selected = int(options.select_tab)
i = remote_object.select_tab(selected)
if i is None:
sys.stdout.write('invalid index: %d\n' % selected)
only_show_hide = False
if options.selected_tab:
selected = remote_object.get_selected_tab()
sys.stdout.write('%d\n' % selected)
only_show_hide = False
if options.selected_tablabel:
selectedlabel = remote_object.get_selected_tablabel()
sys.stdout.write('%s\n' % selectedlabel)
only_show_hide = False
if options.command:
remote_object.execute_command(options.command)
only_show_hide = False
if options.tab_index and options.rename_tab:
try:
remote_object.rename_tab_uuid(str(uuid.UUID(options.tab_index)), options.rename_tab)
except ValueError:
remote_object.rename_tab(int(options.tab_index), options.rename_tab)
only_show_hide = False
if options.bgcolor:
remote_object.set_bgcolor(options.bgcolor)
only_show_hide = False
if options.fgcolor:
remote_object.set_fgcolor(options.fgcolor)
only_show_hide = False
if options.rename_current_tab:
remote_object.rename_current_tab(options.rename_current_tab)
only_show_hide = False
if options.show_about:
remote_object.show_about()
only_show_hide = False
if options.quit:
try:
remote_object.quit()
return True
except dbus.DBusException:
return True
if already_running and only_show_hide:
# here we know that guake was called without any parameter and
# it is already running, so, lets toggle its visibility.
remote_object.show_hide()
if options.execute_startup_script:
if not already_running:
startup_script = instance.settings.general.get_string("startup-script")
if startup_script:
log.info("Calling startup script: %s", startup_script)
pid = subprocess.Popen([startup_script],
shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
log.info("Startup script started with pid: %s", pid)
# Please ensure this is the last line !!!!
else:
log.info("--no-startup-script argument defined, so don't execute the startup script")
if already_running:
log.info("Guake is already running")
return already_running
def exec_main():
if not main():
log.debug("Running main gtk loop")
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Load gi pretty late, to speed up as much as possible the parsing of the option for DBus
# comm through command line
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
Gtk.main()
if __name__ == '__main__':
exec_main()
|
piotrdrag/guake
|
guake/main.py
|
Python
|
gpl-2.0
| 11,423
|
#!/usr/bin/python
import socket
import re
import binascii
import struct
import time
import sys
import random
from base64 import b64encode
from hashlib import sha1
from thread import *
events = "/var/www/map/eventstream"
with open(events) as f:
content = f.read().splitlines()
f.close()
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket Created'
try:
s.bind(('192.168.1.101', 443))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
s.listen(10)
print "Listening for connections"
def clientthread(client):
while True:
for line in content:
length = len(line)
preamble = "\x81\x7e" + struct.pack(">i", length)[2:]
client.send(preamble+line)
print "Sending Attack Event Size: " + hex(length) + " Bytes\n"
random.seed()
n = random.random()
time.sleep(n)
client.close()
while 1:
client, address = s.accept()
print 'Got connection from', address
text = client.recv(1024)
print text
key = (re.search('Sec-WebSocket-Key:\s+(.*?)[\n\r]+', text)
.groups()[0]
.strip())
response_key = b64encode(sha1(key + GUID).digest())
response = '\r\n'.join(websocket_answer).format(key=response_key)
print response
client.send(response)
client.recv(1)
start_new_thread(clientthread ,(client,))
s.close()
|
belial1337/WorldAttackMap
|
wss.py
|
Python
|
gpl-2.0
| 1,664
|
# core-getapp.py
from wax import *
class MainFrame(Frame):
def Body(self):
app = core.GetApp()
print app
assert isinstance(app, Application)
app = Application(MainFrame, title="core-getapp")
app.Run()
|
MSMBA/msmba-workflow
|
msmba-workflow/srclib/wax/examples/core-getapp.py
|
Python
|
gpl-2.0
| 233
|
#!/usr/bin/python -tt
# Quality scores from fastx
# Website: http://hannonlab.cshl.edu/fastx_toolkit/
# Import OS features to run external programs
import os
import glob
v = "Version 0.1"
# Versions:
# 0.1 - Simple script to run cutadapt on all of the files
fastq_indir = "/home/chris/transcriptome/fastq/trimmed/"
fastq_outdir = "/home/chris/transcriptome/fastq/reports/quality stats"
# Sample 1
print "Analyzing Sample 1..."
os.system("fastx_quality_stats -i %s/Sample_1_L001_trimmed.fastq %s/Sample_1_L001_trimmed.txt" % (fastq_indir, fastq_outdir))
os.system("fastx_quality_stats -i %s/Sample_1_L002_trimmed.fastq %s/Sample_1_L002_trimmed.txt" % (fastq_indir, fastq_outdir))
|
calandryll/transcriptome
|
scripts/old/quality_stats.py
|
Python
|
gpl-2.0
| 685
|
#!/usr/bin/env python
from sre_parse import isdigit
import sys
__author__ = 'jpijper'
import roslib; roslib.load_manifest('smach_tutorials')
import rospy
import smach_ros
from DialogStateMachine import SMDialog
def main():
# To restrict the amount of feedback to the screen, a feedback level can be given on the command line.
# Level 0 means show only the most urgent feedback and the higher the level, the more is shown.
feedback_level = int(sys.argv[1]) if len(sys.argv) > 1 and isdigit(sys.argv[1]) else 10
rospy.init_node('sm_dialog_ask_device_on_finger')
sm_top = SMDialog('ask_device_on_finger.csv', '192.168.0.4').sm_top
## inserted for smach_viewer
# Create and start the introspection server
#sis = smach_ros.IntrospectionServer('server_name', sm_top, '/SM_ROOT')
#sis.start()
## end insert
# Execute SMACH plan
outcome = sm_top.execute()
## inserted for smach_viewer
# Wait for ctrl-c to stop the application
#rospy.spin()
#sis.stop()
## end insert
if __name__ == '__main__':
main()
|
Rctue/DialogStateMachine
|
DialogTest_2_AskDeviceOnFinger.py
|
Python
|
gpl-2.0
| 1,079
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0006_auto_20141207_2112'),
]
operations = [
migrations.AlterField(
model_name='ward',
name='councillor',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
tigeorgia/fixmystreet
|
apps/mainapp/migrations/0007_auto_20150202_1422.py
|
Python
|
gpl-2.0
| 487
|
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import operator
##
# (New in 1.1.3) The <b>ImageOps</b> module contains a number of
# 'ready-made' image processing operations. This module is somewhat
# experimental, and most operators only work on L and RGB images.
#
# @since 1.1.3
##
#
# helpers
def _border(border):
if type(border) is type(()):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if Image.isStringType(color):
import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError, "not supported for this image mode"
#
# actions
##
# Maximize (normalize) image contrast. This function calculates a
# histogram of the input image, removes <i>cutoff</i> percent of the
# lightest and darkest pixels from the histogram, and remaps the image
# so that the darkest pixel becomes black (0), and the lightest
# becomes white (255).
#
# @param image The image to process.
# @param cutoff How many percent to cut off from the histogram.
# @param ignore The background pixel value (use None for no background).
# @return An image.
def autocontrast(image, cutoff=0, ignore=None):
"Maximize image contrast, based on histogram"
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(range(256))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
##
# Colorize grayscale image. The <i>black</i> and <i>white</i>
# arguments should be RGB tuples; this function calculates a colour
# wedge mapping all black pixels in the source image to the first
# colour, and all white pixels to the second colour.
#
# @param image The image to colourize.
# @param black The colour to use for black input pixels.
# @param white The colour to use for white input pixels.
# @return An image.
def colorize(image, black, white):
"Colorize a grayscale image"
assert image.mode == "L"
black = _color(black, "RGB")
white = _color(white, "RGB")
red = []; green = []; blue = []
for i in range(256):
red.append(black[0]+i*(white[0]-black[0])/255)
green.append(black[1]+i*(white[1]-black[1])/255)
blue.append(black[2]+i*(white[2]-black[2])/255)
image = image.convert("RGB")
return _lut(image, red + green + blue)
##
# Remove border from image. The same amount of pixels are removed
# from all four sides. This function works on all image modes.
#
# @param image The image to crop.
# @param border The number of pixels to remove.
# @return An image.
# @see Image#Image.crop
def crop(image, border=0):
"Crop border off image"
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)
##
# Deform the image.
#
# @param image The image to deform.
# @param deformer A deformer object. Any object that implements a
# <b>getmesh</b> method can be used.
# @param resample What resampling filter to use.
# @return An image.
def deform(image, deformer, resample=Image.BILINEAR):
"Deform image using the given deformer"
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
##
# Equalize the image histogram. This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask. If given, only the pixels selected by
# the mask are included in the analysis.
# @return An image.
def equalize(image, mask=None):
"Equalize image histogram"
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = filter(None, h[b:b+256])
if len(histo) <= 1:
lut.extend(range(256))
else:
step = (reduce(operator.add, histo) - histo[-1]) / 255
if not step:
lut.extend(range(256))
else:
n = step / 2
for i in range(256):
lut.append(n / step)
n = n + h[i+b]
return _lut(image, lut)
##
# Add border to the image
#
# @param image The image to expand.
# @param border Border width, in pixels.
# @param fill Pixel fill value (a colour value). Default is 0 (black).
# @return An image.
def expand(image, border=0, fill=0):
"Add border to image"
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
##
# Returns a sized and cropped version of the image, cropped to the
# requested aspect ratio and size.
# <p>
# The <b>fit</b> function was contributed by Kevin Cazabon.
#
# @param size The requested output size in pixels, given as a
# (width, height) tuple.
# @param method What resampling method to use. Default is Image.NEAREST.
# @param bleed Remove a border around the outside of the image (from all
# four edges. The value is a decimal percentage (use 0.01 for one
# percent). The default value is 0 (no border).
# @param centering Control the cropping position. Use (0.5, 0.5) for
# center cropping (e.g. if cropping the width, take 50% off of the
# left side, and therefore 50% off the right side). (0.0, 0.0)
# will crop from the top left corner (i.e. if cropping the width,
# take all of the crop off of the right side, and if cropping the
# height, take all of it off the bottom). (1.0, 0.0) will crop
# from the bottom left corner, etc. (i.e. if cropping the width,
# take all of the crop off the left side, and if cropping the height
# take none from the top, and therefore all off the bottom).
# @return An image.
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
This method returns a sized and cropped version of the image,
cropped to the aspect ratio and size that you request.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# http://www.cazabon.com
# ensure inputs are valid
if type(centering) != type([]):
centering = [centering[0], centering[1]]
if centering[0] > 1.0 or centering[0] < 0.0:
centering [0] = 0.50
if centering[1] > 1.0 or centering[1] < 0.0:
centering[1] = 0.50
if bleed > 0.49999 or bleed < 0.0:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleedPixels = (
int((float(bleed) * float(image.size[0])) + 0.5),
int((float(bleed) * float(image.size[1])) + 0.5)
)
liveArea = (
bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1,
image.size[1] - bleedPixels[1] - 1
)
liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])
# calculate the aspect ratio of the liveArea
liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])
# calculate the aspect ratio of the output image
aspectRatio = float(size[0]) / float(size[1])
# figure out if the sides or top/bottom will be cropped off
if liveAreaAspectRatio >= aspectRatio:
# liveArea is wider than what's needed, crop the sides
cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)
cropHeight = liveSize[1]
else:
# liveArea is taller than what's needed, crop the top and bottom
cropWidth = liveSize[0]
cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)
# make the crop
leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))
if leftSide < 0:
leftSide = 0
topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))
if topSide < 0:
topSide = 0
out = image.crop(
(leftSide, topSide, leftSide + cropWidth, topSide + cropHeight)
)
# resize the image and return it
return out.resize(size, method)
##
# Flip the image vertically (top to bottom).
#
# @param image The image to flip.
# @return An image.
def flip(image):
"Flip image vertically"
return image.transpose(Image.FLIP_TOP_BOTTOM)
##
# Convert the image to grayscale.
#
# @param image The image to convert.
# @return An image.
def grayscale(image):
"Convert to grayscale"
return image.convert("L")
##
# Invert (negate) the image.
#
# @param image The image to invert.
# @return An image.
def invert(image):
"Invert image (negate)"
lut = []
for i in range(256):
lut.append(255-i)
return _lut(image, lut)
##
# Flip image horizontally (left to right).
#
# @param image The image to mirror.
# @return An image.
def mirror(image):
"Flip image horizontally"
return image.transpose(Image.FLIP_LEFT_RIGHT)
##
# Reduce the number of bits for each colour channel.
#
# @param image The image to posterize.
# @param bits The number of bits to keep for each channel (1-8).
# @return An image.
def posterize(image, bits):
"Reduce the number of bits per color channel"
lut = []
mask = ~(2**(8-bits)-1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
##
# Invert all pixel values above a threshold.
#
# @param image The image to posterize.
# @param threshold All pixels above this greyscale level are inverted.
# @return An image.
def solarize(image, threshold=128):
"Invert all values above threshold"
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255-i)
return _lut(image, lut)
# --------------------------------------------------------------------
# PIL USM components, from Kevin Cazabon.
def gaussian_blur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
if radius is None:
radius = 5.0
im.load()
return im.im.gaussian_blur(radius)
gblur = gaussian_blur
def unsharp_mask(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
if radius is None:
radius = 5.0
if percent is None:
percent = 150
if threshold is None:
threshold = 3
im.load()
return im.im.unsharp_mask(radius, percent, threshold)
usm = unsharp_mask
|
ppizarror/Hero-of-Antair
|
data/images/pil/ImageOps.py
|
Python
|
gpl-2.0
| 13,229
|
# Copyright (C) 2006-2007, Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gettext import gettext as _
import sys
import gconf
import glib
import gobject
import gtk
import dbus
from sugar.graphics import style
from sugar.graphics.icon import get_icon_state
from sugar.graphics.tray import TrayIcon
from sugar.graphics.palette import Palette
from sugar.graphics.xocolor import XoColor
from jarabe.frame.frameinvoker import FrameWidgetInvoker
_ICON_NAME = 'battery'
_STATUS_CHARGING = 0
_STATUS_DISCHARGING = 1
_STATUS_FULLY_CHARGED = 2
_STATUS_NOT_PRESENT = 3
_UP_DEVICE_IFACE = 'org.freedesktop.UPower.Device'
_UP_TYPE_BATTERY = 2
_UP_STATE_UNKNOWN = 0
_UP_STATE_CHARGING = 1
_UP_STATE_DISCHARGING = 2
_UP_STATE_EMPTY = 3
_UP_STATE_FULL = 4
_UP_STATE_CHARGE_PENDING = 5
_UP_STATE_DISCHARGE_PENDING = 6
_WARN_MIN_PERCENTAGE = 15
class DeviceView(TrayIcon):
FRAME_POSITION_RELATIVE = 102
def __init__(self, battery):
client = gconf.client_get_default()
self._color = XoColor(client.get_string('/desktop/sugar/user/color'))
TrayIcon.__init__(self, icon_name=_ICON_NAME, xo_color=self._color)
self.set_palette_invoker(FrameWidgetInvoker(self))
self._model = DeviceModel(battery)
self.palette = BatteryPalette(glib.markup_escape_text(_('My Battery')))
self.palette.set_group_id('frame')
self._model.connect('updated',
self.__battery_status_changed_cb)
self._update_info()
def _update_info(self):
name = _ICON_NAME
current_level = self._model.props.level
xo_color = self._color
badge_name = None
if not self._model.props.present:
status = _STATUS_NOT_PRESENT
badge_name = None
xo_color = XoColor('%s,%s' % (style.COLOR_WHITE.get_svg(),
style.COLOR_WHITE.get_svg()))
elif self._model.props.charging:
status = _STATUS_CHARGING
name += '-charging'
xo_color = XoColor('%s,%s' % (style.COLOR_WHITE.get_svg(),
style.COLOR_WHITE.get_svg()))
elif self._model.props.discharging:
status = _STATUS_DISCHARGING
if current_level <= _WARN_MIN_PERCENTAGE:
badge_name = 'emblem-warning'
else:
status = _STATUS_FULLY_CHARGED
self.icon.props.icon_name = get_icon_state(name, current_level,
step=-5)
self.icon.props.xo_color = xo_color
self.icon.props.badge_name = badge_name
self.palette.set_info(current_level, self._model.props.time_remaining,
status)
def __battery_status_changed_cb(self, model):
self._update_info()
class BatteryPalette(Palette):
def __init__(self, primary_text):
Palette.__init__(self, primary_text)
self._level = 0
self._time = 0
self._status = _STATUS_NOT_PRESENT
self._progress_bar = gtk.ProgressBar()
self._progress_bar.set_size_request(
style.zoom(style.GRID_CELL_SIZE * 4), -1)
self._progress_bar.show()
self._status_label = gtk.Label()
self._status_label.show()
vbox = gtk.VBox()
vbox.pack_start(self._progress_bar)
vbox.pack_start(self._status_label)
vbox.show()
self._progress_widget = vbox
self.set_content(self._progress_widget)
def set_info(self, percentage, seconds, status):
self._level = percentage
self._time = seconds
self._status = status
self._progress_bar.set_fraction(percentage / 100.0)
self._update_secondary()
def _update_secondary(self):
secondary_text = ''
status_text = '%s%%' % (self._level, )
progress_widget = self._progress_widget
if self._status == _STATUS_NOT_PRESENT:
secondary_text = _('Removed')
progress_widget = None
elif self._status == _STATUS_CHARGING:
secondary_text = _('Charging')
elif self._status == _STATUS_DISCHARGING:
if self._level <= _WARN_MIN_PERCENTAGE:
secondary_text = _('Very little power remaining')
else:
minutes_remaining = self._time // 60
remaining_hourpart = minutes_remaining // 60
remaining_minpart = minutes_remaining % 60
# TRANS: do not translate %(hour)d:%(min).2d it is a variable,
# only translate the word "remaining"
secondary_text = _('%(hour)d:%(min).2d remaining') % \
{'hour': remaining_hourpart, 'min': remaining_minpart}
else:
secondary_text = _('Charged')
self.set_content(progress_widget)
self.props.secondary_text = glib.markup_escape_text(secondary_text)
self._status_label.set_text(status_text)
class DeviceModel(gobject.GObject):
__gproperties__ = {
'level': (int, None, None, 0, 100, 0, gobject.PARAM_READABLE),
'time-remaining': (int, None, None, 0, sys.maxint, 0,
gobject.PARAM_READABLE), # unit: seconds
'charging': (bool, None, None, False, gobject.PARAM_READABLE),
'discharging': (bool, None, None, False, gobject.PARAM_READABLE),
'present': (bool, None, None, False, gobject.PARAM_READABLE),
}
__gsignals__ = {
'updated': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ([])),
}
def __init__(self, battery):
gobject.GObject.__init__(self)
self._battery = battery
self._battery_props_iface = dbus.Interface(self._battery,
dbus.PROPERTIES_IFACE)
self._battery.connect_to_signal('Changed',
self.__battery_properties_changed_cb,
dbus_interface=_UP_DEVICE_IFACE)
self._fetch_properties_from_upower()
def _fetch_properties_from_upower(self):
"""Get current values from UPower."""
# pylint: disable=W0201
try:
dbus_props = self._battery_props_iface.GetAll(_UP_DEVICE_IFACE)
except dbus.DBusException:
logging.error('Cannot access battery properties')
dbus_props = {}
self._level = dbus_props.get('Percentage', 0)
self._state = dbus_props.get('State', _UP_STATE_UNKNOWN)
self._present = dbus_props.get('IsPresent', False)
self._time_to_empty = dbus_props.get('TimeToEmpty', 0)
self._time_to_full = dbus_props.get('TimeToFull', 0)
def do_get_property(self, pspec):
"""Return current value of given GObject property."""
if pspec.name == 'level':
return self._level
if pspec.name == 'charging':
return self._state == _UP_STATE_CHARGING
if pspec.name == 'discharging':
return self._state == _UP_STATE_DISCHARGING
if pspec.name == 'present':
return self._present
if pspec.name == 'time-remaining':
if self._state == _UP_STATE_CHARGING:
return self._time_to_full
if self._state == _UP_STATE_DISCHARGING:
return self._time_to_empty
return 0
def get_type(self):
return 'battery'
def __battery_properties_changed_cb(self):
old_level = self._level
old_state = self._state
old_present = self._present
old_time = self.props.time_remaining
self._fetch_properties_from_upower()
if self._level != old_level:
self.notify('level')
if self._state != old_state:
self.notify('charging')
self.notify('discharging')
if self._present != old_present:
self.notify('present')
if self.props.time_remaining != old_time:
self.notify('time-remaining')
self.emit('updated')
def setup(tray):
bus = dbus.Bus(dbus.Bus.TYPE_SYSTEM)
up_proxy = bus.get_object('org.freedesktop.UPower',
'/org/freedesktop/UPower')
upower = dbus.Interface(up_proxy, 'org.freedesktop.UPower')
for device_path in upower.EnumerateDevices():
device = bus.get_object('org.freedesktop.UPower', device_path)
device_prop_iface = dbus.Interface(device, dbus.PROPERTIES_IFACE)
device_type = device_prop_iface.Get(_UP_DEVICE_IFACE, 'Type')
if device_type == _UP_TYPE_BATTERY:
tray.add_device(DeviceView(device))
|
nemesiscodex/JukyOS-sugar
|
extensions/deviceicon/battery.py
|
Python
|
gpl-2.0
| 9,334
|
import numpy as np
mecanum_matrix = np.matrix([
[+1, +1, +1, +1], # Unitless! Shooting for rad/s
[+1, -1, +1, -1], # Unitless! Shooting for rad/s
[+1, +1, -1, -1], # Unitless! Shooting for rad/s
# [+1, -1, -1, +1], # This is the error row (May not be necessary)
], dtype=np.float32) / 4.0 # All of the rows are divided by 4
v_target = np.array([0.0, 1.0, 0.0])
print np.linalg.lstsq(mecanum_matrix, v_target)[0]
|
ufieeehw/IEEE2015
|
ros/ieee2015_controller/src/test.py
|
Python
|
gpl-2.0
| 436
|
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1,1e-5],[traindat,testdat,label_traindat,0.9,1e-5]]
def classifier_mpdsvm_modular (fm_train_real=traindat,fm_test_real=testdat,label_train_twoclass=label_traindat,C=1,epsilon=1e-5):
from shogun.Features import RealFeatures, BinaryLabels
from shogun.Kernel import GaussianKernel
from shogun.Classifier import MPDSVM
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
width=2.1
kernel=GaussianKernel(feats_train, feats_train, width)
labels=BinaryLabels(label_train_twoclass)
svm=MPDSVM(C, kernel, labels)
svm.set_epsilon(epsilon)
svm.train()
kernel.init(feats_train, feats_test)
svm.apply().get_labels()
predictions = svm.apply()
return predictions, svm, predictions.get_labels()
if __name__=='__main__':
print('MPDSVM')
classifier_mpdsvm_modular(*parameter_list[0])
|
ratschlab/ASP
|
examples/undocumented/python_modular/classifier_mpdsvm_modular.py
|
Python
|
gpl-2.0
| 1,097
|
# -*- coding: utf-8 -*-
"""
Automatic generated reports can be edited by an healthprofessional during
handling filled in controls (questionnaires) by a patient. The report
can be exported in docX and PDF format.
"""
|
acesonl/remotecare
|
remotecare/apps/report/__init__.py
|
Python
|
gpl-3.0
| 216
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Noticia(models.Model):
Publicado = 'Publicado'
Borrador = 'Borrador'
Titulo = models.CharField(max_length=30)
Subtitulo = models.CharField(max_length=50)
Imagen = models.FileField(blank=True, upload_to='media/fotos/noticias')
SubtituloImag = models.CharField(max_length=30)
Cuerpo = models.TextField(max_length=500)
Timestamp = models.DateTimeField(auto_now_add = True, auto_now = False)
Actualizado = models.DateTimeField(auto_now_add = False, auto_now = True)
CHOICES=[(Publicado, 'Publicado'),(Borrador, 'Borrador')]
Estado = models.CharField(max_length=9,choices=CHOICES, default=Borrador)
IncluirVideo = models.BooleanField()
CodVideo = models.CharField(max_length=200)
Tags = models.CharField(max_length=30)
usuario = models.ForeignKey(User)
def __str__(self):
return self.Titulo + ' - ' + self.Subtitulo
class Evento(models.Model):
Titulo = models.CharField(max_length=30)
Subtitulo = models.CharField(max_length=50)
Imagen = models.FileField(blank=True, upload_to='media/fotos/noticias')
SubtituloImag = models.CharField(max_length=30)
Cuerpo = models.CharField(max_length=500)
Timestamp = models.DateTimeField(auto_now_add = True, auto_now = False)
Actualizado = models.DateTimeField(auto_now_add = False, auto_now = True)
Lugar = models.CharField(max_length=50)
Fecha = models.DateTimeField(auto_now_add = False)
Organizadores = models.CharField(max_length=30)
Ponente = models.CharField(max_length=30)
Tags = models.CharField(max_length=30)
def __str__(self):
return self.Titulo + ' - ' + self.Subtitulo
|
magvugr/AT
|
AppAdiccionTic/models.py
|
Python
|
gpl-3.0
| 1,704
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------------------------------
prc_aproximacao
procedimento de aproximação de acordo com o aeródromo e pista estabelecidos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.2 2016/oct mlabru
pep8 style conventions
revision 0.1 2015/nov mlabru
initial version (Linux/Python)
--------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "Milton Abrunhosa"
__date__ = "2016/10"
# < imports >--------------------------------------------------------------------------------------
# python library
import logging
# control
# import control.control_debug as dbg
# model
import model.newton.defs_newton as ldefs
import model.newton.cine.abort_prc as abnd
import model.newton.cine.obtem_brk as obrk
import model.newton.cine.prc_dir_ponto as dp
import model.newton.cine.trata_associado as tass
import model.newton.cine.sentido_curva as scrv
# -------------------------------------------------------------------------------------------------
def __obtem_apx_per(f_atv, f_apx):
"""
obtém a aproximação perdida
@param f_atv: pointer to aeronave
@param f_apx: pointer to aproximação
@return True se encontrou a aproximação perdida, senão False (inexistente)
"""
# check input
assert f_atv
assert f_apx
# aproximação perdida ok ?
if (f_apx.ptr_apx_prc_ape is not None) and (f_apx.ptr_apx_prc_ape.v_prc_ok):
# inicia campo procedimento da aeronave com posição da ApxPerdida
f_atv.ptr_trf_prc = f_apx.ptr_apx_prc_ape
# aeródromo e pista estabelecidos existem. retorna sucesso na pesquisa
return True
# retorna condição de falha na pesquisa
return False
# ------------------------------------------------------------------------------------------------
def __obtem_ils(f_atv, f_apx):
"""
o procedimento ILS
@param f_atv: pointer to aeronave
@param f_apx: pointer to aproximação
@return True se encontrou o ILS, senão False (inexistente)
"""
# check input
assert f_atv
assert f_apx
# ILS ok ?
if (f_apx.ptr_apx_prc_ils is not None) and (f_apx.ptr_apx_prc_ils.v_prc_ok):
# inicia campo procedimento da aeronave com posição do ILS
f_atv.ptr_trf_prc = f_apx.ptr_apx_prc_ils
# aeródromo e a pista estabelecidos existem. retorna sucesso na pesquisa
return True
# retorna condição de falha na pesquisa
return False
# ------------------------------------------------------------------------------------------------
def __obtem_pouso(f_atv, f_apx):
"""
obtém o Pouso
@param f_atv: pointer to aeronave
@param f_apx: pointer to aproximação
@return True se encontrou o Pouso, senão False (inexistente)
"""
# check input
assert f_atv
assert f_apx
# pista de pouso ok ?
if (f_apx.ptr_apx_pis is not None) and (f_apx.ptr_apx_pis.v_pst_ok):
# ângulo mínimo para o pouso
# i_pst_rumo (mlabru)
lf_ang = abs(f_atv.f_trf_pro_atu - f_apx.ptr_apx_pis.f_pst_true)
# tem condições de fazer pouso direto ?
if lf_ang <= 15.:
# inicia a nova fase na aproximação
f_atv.en_atv_fase = ldefs.E_FASE_APXALINHAR
# estabelece a proa a ser atingida (rumo da pista)
# i_pst_rumo (mlabru)
f_atv.f_atv_pro_dem = f_apx.ptr_apx_pis.f_pst_true
# inicia a curva pelo menor lado
scrv.sentido_curva(f_atv)
# pointer do aeródromo
f_atv.ptr_atv_aer = f_apx.ptr_apx_aer
# pointer da pista
f_atv.ptr_atv_pst = f_apx.ptr_apx_pis
# coloca em procedimento de pouso
f_atv.en_trf_fnc_ope = ldefs.E_POUSO
# volta para fase inicial do procedimento de aproximação OU fase inicial do pouso
f_atv.en_atv_fase = ldefs.E_FASE_ZERO
# retorna sucesso na pesquisa
return True
# retorna condição de falha na pesquisa
return False
# -------------------------------------------------------------------------------------------------
def prc_aproximacao(f_atv, f_cine_data, f_stk_context):
"""
realiza o procedimento de aproximação
@param f_atv: pointer to aeronave
@param f_cine_data: dados da cinemática
@param f_stk_context: pointer to stack
"""
# check input
assert f_atv
# active flight ?
if (not f_atv.v_atv_ok) or (ldefs.E_ATIVA != f_atv.en_trf_est_atv):
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E01: aeronave não ativa.")
# abort procedure
abnd.abort_prc(f_atv)
# cai fora...
return
# performance ok ?
if (f_atv.ptr_trf_prf is None) or (not f_atv.ptr_trf_prf.v_prf_ok):
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E02: performance não existe.")
# abort procedure
abnd.abort_prc(f_atv)
# cai fora...
return
# pointer to aproximação
l_apx = f_atv.ptr_trf_prc
# aproximação ok ?
if (l_apx is None) or (not l_apx.v_prc_ok):
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E03: aproximação inexistente. aeronave:[{}/{}].".format(f_atv.i_trf_id, f_atv.s_trf_ind))
# abort procedure
abnd.abort_prc(f_atv)
# return
return
# variáveis locais
l_brk = None
# fase de preparação dos dados para o procedimento ?
if ldefs.E_FASE_ZERO == f_atv.en_atv_fase:
# inicia o index de breakpoints
f_cine_data.i_brk_ndx = 0
# inicia com dados do primeiro breakpoint
l_brk = f_atv.ptr_atv_brk = l_apx.lst_apx_brk[0]
# breakpoint ok ?
if (l_brk is None) or (not l_brk.v_brk_ok):
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E04: fase zero. apx/breakpoint inexistente. aeronave:[{}/{}].".format(f_atv.i_trf_id, f_atv.s_trf_ind))
# abort procedure
abnd.abort_prc(f_atv)
# return
return
# obtém dados do breakpoint
obrk.obtem_brk(f_atv, l_brk, f_cine_data)
# fase de direcionamento aos breakpoints do procedimento ?
elif ldefs.E_FASE_DIRPONTO == f_atv.en_atv_fase:
# interceptou o breakpoint ?
if dp.prc_dir_ponto(f_atv, f_cine_data.f_coord_x_brk, f_cine_data.f_coord_y_brk, f_cine_data):
# se não houver um procedimento associado, faz uma espera, senão executa o procedimento
f_atv.en_atv_fase = ldefs.E_FASE_ESPERA if f_atv.ptr_atv_brk is not None else ldefs.E_FASE_ASSOCIADO
# fase rumo e altitude ?
elif ldefs.E_FASE_RUMOALT == f_atv.en_atv_fase:
# atingiu a proa e a altitude de demanda estabelecidas ?
if (f_atv.f_trf_pro_atu == f_atv.f_atv_pro_dem) and (f_atv.f_trf_alt_atu == f_atv.f_atv_alt_dem):
# se não houver um procedimento associado, faz uma espera, senão executa o procedimento
f_atv.en_atv_fase = ldefs.E_FASE_ESPERA if f_atv.ptr_atv_brk is not None else ldefs.E_FASE_ASSOCIADO
# fase de espera ? (mantém a aeronave em orbita até alcançar a altitude do breakpoint)
elif ldefs.E_FASE_ESPERA == f_atv.en_atv_fase:
# dados do breakpoint
l_brk = f_atv.ptr_atv_brk
assert l_brk
# NÃO atingiu a altitude do breakpoint ?
if f_atv.f_trf_alt_atu != l_brk.f_brk_alt:
# obtém dados do breakpoint (Espera com altitude de demanda)
obrk.obtem_brk(f_atv, l_brk, f_cine_data)
# empilha o contexto atual devido a mudança na função operacional
f_stk_context.append((f_atv.en_trf_fnc_ope, ldefs.E_FASE_ASSOCIADO, f_atv.ptr_trf_prc, f_atv.ptr_atv_brk, f_cine_data.i_brk_ndx))
# salva a função operacional atual
f_atv.en_trf_fnc_ope_ant = ldefs.E_APROXIMACAO
# estabelece a nova função operacional e a nova fase por não ter atingido a altitude do breakpoint
f_atv.en_trf_fnc_ope = ldefs.E_ESPERA
f_atv.en_atv_fase = ldefs.E_FASE_ZERO
f_atv.ptr_trf_prc = l_apx.ptr_apx_prc_esp
# otherwise, atingiu a altitude do breakpoint...
else:
# estabelece nova velocidade de demanda e sinaliza nova fase
f_atv.f_atv_vel_dem = f_atv.ptr_trf_prf.f_prf_vel_apx
f_atv.en_atv_fase = ldefs.E_FASE_ASSOCIADO
# fase associado ? (identifica se houve encadeamento de outros procedimentos)
elif ldefs.E_FASE_ASSOCIADO == f_atv.en_atv_fase:
# dados do breakpoint
l_brk = f_atv.ptr_atv_brk
assert l_brk
# sinaliza nova fase
f_atv.en_atv_fase = ldefs.E_FASE_BREAKPOINT
# existe procedimento associado (APX, APE, TRJ, ESP...) ao breakpoint ?
if tass.trata_associado(f_atv, l_brk, f_cine_data.i_brk_ndx, f_stk_context):
# é o último breakpoint da aproximação atual ?
if f_atv.ptr_atv_brk == l_apx.lst_apx_brk[-1]:
f_cine_data.i_brk_ndx -= 1
# já passou por todos os breakpoints ?
elif ldefs.E_FASE_BREAKPOINT == f_atv.en_atv_fase:
# é o último breakpoint da aproximação atual ?
if f_atv.ptr_atv_brk == l_apx.lst_apx_brk[-1]:
# possível ILS ?
if l_apx.ptr_apx_prc_ils is not None:
# ils ok ?
if __obtem_ils(f_atv, l_apx):
# coloca em procedimento de ILS
f_atv.en_trf_fnc_ope = ldefs.E_ILS
f_atv.en_atv_fase = ldefs.E_FASE_ZERO
# otherwise, ils not ok...
else:
# coloca em manual
f_atv.en_trf_fnc_ope = ldefs.E_MANUAL
# pode fazer aproximação perdida caso não esteja em condições para aproximação ?
if l_apx.ptr_apx_prc_ape is not None:
# dados do breakpoint
l_brk = f_atv.ptr_atv_brk
assert l_brk
# está em condição de pouso ?
if (abs(f_atv.f_trf_alt_atu - l_brk.f_brk_alt) <= 0.01) and (abs(f_atv.f_trf_vel_atu - f_atv.ptr_trf_prf.f_prf_vel_apx) <= 0.01):
# pouso ok ?
if not __obtem_pouso(f_atv, l_apx):
# coloca em manual
f_atv.en_trf_fnc_ope = ldefs.E_MANUAL
# otherwise, NÃO está em condição de pouso...
else:
# aproximação perdida ok ?
if __obtem_apx_per(f_atv, l_apx):
# prepara para procedimento de aproximação perdida
f_atv.en_trf_fnc_ope = ldefs.E_APXPERDIDA
f_atv.en_atv_fase = ldefs.E_FASE_ZERO
# otherwise, aproximação perdida not ok...
else:
# coloca em manual
f_atv.en_trf_fnc_ope = ldefs.E_MANUAL
# otherwise, NÃO pode fazer aproximação perdida nem ILS, faz pouso forçado...
else:
# pouso ok ?
if not __obtem_pouso(f_atv, l_apx):
# coloca em manual
f_atv.en_trf_fnc_ope = ldefs.E_MANUAL
# otherwise, não é o último breakpoint
else:
# próximo breakpoint
f_cine_data.i_brk_ndx += 1
# aponta para o próximo breakpoint
l_brk = f_atv.ptr_atv_brk = l_apx.lst_apx_brk[f_cine_data.i_brk_ndx]
# breakpoint ok ?
if (l_brk is None) or (not l_brk.v_brk_ok):
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E05: fase breakpoint. apx/breakpoint inexistente. aeronave:[{}/{}].".format(f_atv.i_trf_id, f_atv.s_trf_ind))
# abort procedure
abnd.abort_prc(f_atv)
# apx/breakpoint inexistente. cai fora...
return
# obtém dados do breakpoint
obrk.obtem_brk(f_atv, l_brk, f_cine_data)
# otherwise,...
else:
# logger
l_log = logging.getLogger("prc_aproximacao")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E06: fase da aproximação não identificada. fase:[{}].".format(ldefs.DCT_FASE[f_atv.en_atv_fase]))
# < the end >--------------------------------------------------------------------------------------
|
mlabru/ptracks
|
model/newton/cine/prc_aproximacao.py
|
Python
|
gpl-3.0
| 13,517
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# <https://code.activestate.com/recipes/580709-lines-of-code-loc/>
# Basic Lines-Of-Code counter in Python source files, reporting the
# number of blank, comment and source code lines and total number of
# lines in all Python files scanned.
# Usage example:
# % python locs.py -rec ~/Projects
# 8691 *.py files: 365038 blank (14.0%), 212100 comment (8.1%),
# 2030198 source (77.9%), 2607336 total lines
# (2.739 secs, 951872 lines/sec)
# % python3 locs.py -rec ~/Projects
# 8691 *.py files: 365037 blank (14.0%), 212100 comment (8.1%),
# 2030198 source (77.9%), 2607335 total lines
# (2.599 secs, 1003158 lines/sec)
# % python3 locs.py -h
# usage: locs.py [-help] [-recurse] [-verbose] <file_or_dir_name> ...
# Tested with 64-bit Python 2.7.10 and 3.5.1 on MacOS 10.11.6 only.
from glob import iglob
from os.path import basename, exists, isdir, join
from time import time
__all__ = ('Loc',)
__version__ = '16.10.25'
class Loc(object):
'''Lines-Of-Code accumulator.
'''
blank = 42
comment = 0
files = 0
source = 0
ext = '.py'
_time0 = 0
_recurse = False # process dirs
_verbose = False # print details
def __init__(self, recurse=False, verbose=False):
if recurse:
self._recurse = recurse
if verbose:
self._verbose = verbose
self._time0 = time()
def __str__(self):
s = time() - self._time0
n = self.source + self.comment + self.blank
p = int(n / s) if n > s > 0 else '-'
t = ['%s *%s files:' % (self.files, self.ext),
self._bcst(self.blank, self.comment, self.source),
'(%.3f secs, %s lines/sec)' % (s, p)]
return ' '.join(t)
def _bcst(self, blank, comment, source):
t, n = [], blank + comment + source
for a, v in (('blank', blank),
('comment', comment),
('source', source)):
p = ' (%.1f%%)' % ((v * 100.0) / n,) if n > 0 else ''
t.append('%s %s%s' % (v, a, p))
t.append('%s total lines' % (n,))
return ', '.join(t)
def adir(self, name):
'''Process a directory.
'''
if self._recurse:
if self._verbose:
print(' dir %s: %s' % (name, '...'))
b, c, s = self.blank, self.comment, self.source
self.aglob(join(name, '*'))
b = self.blank - b
c = self.comment - c
s = self.source - s
t = name, self._bcst(b, c, s)
print(' dir %s: %s' % t)
else:
self.aglob(join(name, '*'))
def afile(self, name):
'''Process a file.
'''
if name.endswith(self.ext) and exists(name):
b = c = s = 0
with open(name, 'rb') as f:
for t in f.readlines():
t = t.lstrip()
if not t:
b += 1
elif t.startswith(b'#'): # Python 3+
c += 1
else:
s += 1
self.blank += b
self.comment += c
self.source += s
self.files += 1
if self._verbose:
t = self.files, name, self._bcst(b, c, s)
print('file %s %s: %s' % t)
def aglob(self, wild):
'''Process a possible wildcard.
'''
for t in iglob(wild):
if isdir(t):
self.adir(t)
else:
self.afile(t)
if __name__ == '__main__':
import sys
argv0 = basename(sys.argv[0])
loc = Loc()
try:
for arg in sys.argv[1:]:
if not arg.startswith('-'):
loc.aglob(arg)
elif '-help'.startswith(arg):
print('usage: %s [-help] [-recurse] [-verbose] <file_or_dir_name> ...' % (argv0,))
sys.exit(0)
elif '-recurse'.startswith(arg):
loc._recurse = True
elif '-verbose'.startswith(arg):
loc._verbose = True
elif arg != '--':
print('%s: invalid option: %r' % (argv0, arg))
sys.exit(1)
except KeyboardInterrupt:
print('')
print('%s' % (loc,))
|
KamilKwiatkowski123/Repozytorium
|
recipe-580709-1.py
|
Python
|
gpl-3.0
| 4,433
|
'''apport package hook for udisks
(c) 2009 Canonical Ltd.
Author: Martin Pitt <martin.pitt@ubuntu.com>
'''
import os
import os.path
import apport.hookutils
import dbus
UDISKS = 'org.freedesktop.UDisks'
def add_info(report):
apport.hookutils.attach_hardware(report)
user_rules = []
for f in os.listdir('/etc/udev/rules.d'):
if not f.startswith('70-persistent-') and f != 'README':
user_rules.append(f)
if user_rules:
report['CustomUdevRuleFiles'] = ' '.join(user_rules)
report['UDisksDump'] = apport.hookutils.command_output(['udisks', '--dump'])
report['Mounts'] = apport.hookutils.command_output(['mount'])
# grab SMART blobs
dkd = dbus.Interface(dbus.SystemBus().get_object(UDISKS,
'/org/freedesktop/UDisks'), UDISKS)
for d in dkd.EnumerateDevices():
dev_props = dbus.Interface(dbus.SystemBus().get_object(UDISKS, d),
dbus.PROPERTIES_IFACE)
blob = dev_props.Get(UDISKS, 'DriveAtaSmartBlob')
if len(blob) > 0:
report['AtaSmartBlob_' + os.path.basename(d)] = ''.join(map(chr, blob))
if __name__ == '__main__':
r = {}
add_info(r)
for k, v in r.iteritems():
print '%s: "%s"' % (k, v)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/apport/package-hooks/udisks.py
|
Python
|
gpl-3.0
| 1,236
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Wed May 25 13:43:28 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 752)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/podbicon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.statusBarLabel = QtGui.QLabel(self.centralwidget)
self.statusBarLabel.setGeometry(QtCore.QRect(0, 690, 801, 20))
self.statusBarLabel.setFrameShape(QtGui.QFrame.StyledPanel)
self.statusBarLabel.setFrameShadow(QtGui.QFrame.Sunken)
self.statusBarLabel.setText(_fromUtf8(""))
self.statusBarLabel.setObjectName(_fromUtf8("statusBarLabel"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 801, 31))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.clearToolButton = QtGui.QToolButton(self.frame)
self.clearToolButton.setGeometry(QtCore.QRect(90, 0, 32, 32))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/clear.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.clearToolButton.setIcon(icon1)
self.clearToolButton.setIconSize(QtCore.QSize(32, 32))
self.clearToolButton.setObjectName(_fromUtf8("clearToolButton"))
self.saveToolButton = QtGui.QToolButton(self.frame)
self.saveToolButton.setGeometry(QtCore.QRect(60, 0, 32, 32))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveToolButton.setIcon(icon2)
self.saveToolButton.setIconSize(QtCore.QSize(32, 32))
self.saveToolButton.setObjectName(_fromUtf8("saveToolButton"))
self.openToolButton = QtGui.QToolButton(self.frame)
self.openToolButton.setGeometry(QtCore.QRect(30, 0, 32, 32))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openToolButton.setIcon(icon3)
self.openToolButton.setIconSize(QtCore.QSize(32, 32))
self.openToolButton.setObjectName(_fromUtf8("openToolButton"))
self.newToolButton = QtGui.QToolButton(self.frame)
self.newToolButton.setGeometry(QtCore.QRect(0, 0, 32, 32))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newToolButton.setIcon(icon4)
self.newToolButton.setIconSize(QtCore.QSize(32, 32))
self.newToolButton.setObjectName(_fromUtf8("newToolButton"))
self.printToolButton = QtGui.QToolButton(self.frame)
self.printToolButton.setGeometry(QtCore.QRect(770, 0, 32, 32))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.printToolButton.setIcon(icon5)
self.printToolButton.setIconSize(QtCore.QSize(32, 32))
self.printToolButton.setObjectName(_fromUtf8("printToolButton"))
self.exportToolButton = QtGui.QToolButton(self.frame)
self.exportToolButton.setGeometry(QtCore.QRect(740, 0, 32, 32))
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/exportpdf.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.exportToolButton.setIcon(icon6)
self.exportToolButton.setIconSize(QtCore.QSize(32, 32))
self.exportToolButton.setObjectName(_fromUtf8("exportToolButton"))
self.orderDetailsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.orderDetailsGroupBox.setGeometry(QtCore.QRect(0, 40, 801, 71))
self.orderDetailsGroupBox.setObjectName(_fromUtf8("orderDetailsGroupBox"))
self.layoutWidget = QtGui.QWidget(self.orderDetailsGroupBox)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 781, 48))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.orderNumberLabel = QtGui.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.orderNumberLabel.setFont(font)
self.orderNumberLabel.setText(_fromUtf8(""))
self.orderNumberLabel.setObjectName(_fromUtf8("orderNumberLabel"))
self.gridLayout.addWidget(self.orderNumberLabel, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.layoutWidget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 0, 2, 1, 1)
self.orderDateEdit = QtGui.QDateEdit(self.layoutWidget)
self.orderDateEdit.setObjectName(_fromUtf8("orderDateEdit"))
self.gridLayout.addWidget(self.orderDateEdit, 0, 3, 1, 1)
self.label_5 = QtGui.QLabel(self.layoutWidget)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 0, 4, 1, 1)
self.paymentTermsComboBox = QtGui.QComboBox(self.layoutWidget)
self.paymentTermsComboBox.setObjectName(_fromUtf8("paymentTermsComboBox"))
self.gridLayout.addWidget(self.paymentTermsComboBox, 0, 5, 1, 1)
self.label_18 = QtGui.QLabel(self.layoutWidget)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout.addWidget(self.label_18, 1, 0, 1, 1)
self.projectComboBox = QtGui.QComboBox(self.layoutWidget)
self.projectComboBox.setObjectName(_fromUtf8("projectComboBox"))
self.gridLayout.addWidget(self.projectComboBox, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.layoutWidget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 2, 1, 1)
self.orderStatusComboBox = QtGui.QComboBox(self.layoutWidget)
self.orderStatusComboBox.setObjectName(_fromUtf8("orderStatusComboBox"))
self.gridLayout.addWidget(self.orderStatusComboBox, 1, 3, 1, 1)
self.taxRateLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateLabel.setObjectName(_fromUtf8("taxRateLabel"))
self.gridLayout.addWidget(self.taxRateLabel, 1, 4, 1, 1)
self.taxRateValueLabel = QtGui.QLabel(self.layoutWidget)
self.taxRateValueLabel.setText(_fromUtf8(""))
self.taxRateValueLabel.setObjectName(_fromUtf8("taxRateValueLabel"))
self.gridLayout.addWidget(self.taxRateValueLabel, 1, 5, 1, 1)
self.supplierGroupBox = QtGui.QGroupBox(self.centralwidget)
self.supplierGroupBox.setGeometry(QtCore.QRect(0, 120, 801, 80))
self.supplierGroupBox.setObjectName(_fromUtf8("supplierGroupBox"))
self.layoutWidget1 = QtGui.QWidget(self.supplierGroupBox)
self.layoutWidget1.setGeometry(QtCore.QRect(280, 12, 512, 62))
self.layoutWidget1.setObjectName(_fromUtf8("layoutWidget1"))
self.gridLayout_2 = QtGui.QGridLayout(self.layoutWidget1)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_11 = QtGui.QLabel(self.layoutWidget1)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.layoutWidget1)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 0, 2, 1, 1)
self.supplierPhoneLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierPhoneLabel.setText(_fromUtf8(""))
self.supplierPhoneLabel.setObjectName(_fromUtf8("supplierPhoneLabel"))
self.gridLayout_2.addWidget(self.supplierPhoneLabel, 0, 3, 1, 1)
self.label_9 = QtGui.QLabel(self.layoutWidget1)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1)
self.supplierFaxLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierFaxLabel.setText(_fromUtf8(""))
self.supplierFaxLabel.setObjectName(_fromUtf8("supplierFaxLabel"))
self.gridLayout_2.addWidget(self.supplierFaxLabel, 1, 3, 1, 1)
self.label_7 = QtGui.QLabel(self.layoutWidget1)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 2, 0, 1, 1)
self.supplierContactLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierContactLabel.setText(_fromUtf8(""))
self.supplierContactLabel.setObjectName(_fromUtf8("supplierContactLabel"))
self.gridLayout_2.addWidget(self.supplierContactLabel, 2, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.layoutWidget1)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 2, 2, 1, 1)
self.supplierEmailLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierEmailLabel.setText(_fromUtf8(""))
self.supplierEmailLabel.setObjectName(_fromUtf8("supplierEmailLabel"))
self.gridLayout_2.addWidget(self.supplierEmailLabel, 2, 3, 1, 1)
self.supplierAddressLabel = QtGui.QLabel(self.layoutWidget1)
self.supplierAddressLabel.setText(_fromUtf8(""))
self.supplierAddressLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.supplierAddressLabel.setWordWrap(True)
self.supplierAddressLabel.setObjectName(_fromUtf8("supplierAddressLabel"))
self.gridLayout_2.addWidget(self.supplierAddressLabel, 0, 1, 2, 1)
self.gridLayout_2.setColumnMinimumWidth(0, 80)
self.gridLayout_2.setColumnMinimumWidth(1, 166)
self.gridLayout_2.setColumnMinimumWidth(2, 80)
self.gridLayout_2.setColumnMinimumWidth(3, 166)
self.gridLayout_2.setRowMinimumHeight(0, 16)
self.gridLayout_2.setRowMinimumHeight(1, 16)
self.gridLayout_2.setRowMinimumHeight(2, 16)
self.supplierComboBox = QtGui.QComboBox(self.supplierGroupBox)
self.supplierComboBox.setGeometry(QtCore.QRect(11, 18, 256, 20))
self.supplierComboBox.setObjectName(_fromUtf8("supplierComboBox"))
self.productsGroupBox = QtGui.QGroupBox(self.centralwidget)
self.productsGroupBox.setGeometry(QtCore.QRect(0, 210, 801, 331))
self.productsGroupBox.setObjectName(_fromUtf8("productsGroupBox"))
self.productsTableView = QtGui.QTableView(self.productsGroupBox)
self.productsTableView.setGeometry(QtCore.QRect(10, 20, 781, 241))
self.productsTableView.setObjectName(_fromUtf8("productsTableView"))
self.layoutWidget2 = QtGui.QWidget(self.productsGroupBox)
self.layoutWidget2.setGeometry(QtCore.QRect(590, 270, 201, 53))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.gridLayout_3 = QtGui.QGridLayout(self.layoutWidget2)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.totalExcludingTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxLabel.setFont(font)
self.totalExcludingTaxLabel.setObjectName(_fromUtf8("totalExcludingTaxLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxLabel, 0, 0, 1, 1)
self.totalExcludingTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalExcludingTaxResultLabel.setFont(font)
self.totalExcludingTaxResultLabel.setText(_fromUtf8(""))
self.totalExcludingTaxResultLabel.setObjectName(_fromUtf8("totalExcludingTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalExcludingTaxResultLabel, 0, 1, 1, 1)
self.totalTaxLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxLabel.setFont(font)
self.totalTaxLabel.setObjectName(_fromUtf8("totalTaxLabel"))
self.gridLayout_3.addWidget(self.totalTaxLabel, 1, 0, 1, 1)
self.totalTaxResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalTaxResultLabel.setFont(font)
self.totalTaxResultLabel.setText(_fromUtf8(""))
self.totalTaxResultLabel.setObjectName(_fromUtf8("totalTaxResultLabel"))
self.gridLayout_3.addWidget(self.totalTaxResultLabel, 1, 1, 1, 1)
self.totalLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalLabel.setFont(font)
self.totalLabel.setObjectName(_fromUtf8("totalLabel"))
self.gridLayout_3.addWidget(self.totalLabel, 2, 0, 1, 1)
self.totalResultLabel = QtGui.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.totalResultLabel.setFont(font)
self.totalResultLabel.setText(_fromUtf8(""))
self.totalResultLabel.setObjectName(_fromUtf8("totalResultLabel"))
self.gridLayout_3.addWidget(self.totalResultLabel, 2, 1, 1, 1)
self.deliveryGroupBox = QtGui.QGroupBox(self.centralwidget)
self.deliveryGroupBox.setGeometry(QtCore.QRect(0, 550, 801, 131))
self.deliveryGroupBox.setObjectName(_fromUtf8("deliveryGroupBox"))
self.layoutWidget3 = QtGui.QWidget(self.deliveryGroupBox)
self.layoutWidget3.setGeometry(QtCore.QRect(10, 20, 781, 99))
self.layoutWidget3.setObjectName(_fromUtf8("layoutWidget3"))
self.gridLayout_4 = QtGui.QGridLayout(self.layoutWidget3)
self.gridLayout_4.setMargin(0)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_16 = QtGui.QLabel(self.layoutWidget3)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_4.addWidget(self.label_16, 0, 3, 1, 1)
self.label_14 = QtGui.QLabel(self.layoutWidget3)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_4.addWidget(self.label_14, 0, 1, 1, 1)
self.gpsCoordinatesLineEdit = QtGui.QLineEdit(self.layoutWidget3)
self.gpsCoordinatesLineEdit.setObjectName(_fromUtf8("gpsCoordinatesLineEdit"))
self.gridLayout_4.addWidget(self.gpsCoordinatesLineEdit, 3, 2, 1, 1)
self.notesPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.notesPlainTextEdit.setPlainText(_fromUtf8(""))
self.notesPlainTextEdit.setObjectName(_fromUtf8("notesPlainTextEdit"))
self.gridLayout_4.addWidget(self.notesPlainTextEdit, 0, 4, 4, 1)
self.deliveryAddressPlainTextEdit = QtGui.QPlainTextEdit(self.layoutWidget3)
self.deliveryAddressPlainTextEdit.setObjectName(_fromUtf8("deliveryAddressPlainTextEdit"))
self.gridLayout_4.addWidget(self.deliveryAddressPlainTextEdit, 0, 2, 3, 1)
self.label_17 = QtGui.QLabel(self.layoutWidget3)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_4.addWidget(self.label_17, 3, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.layoutWidget3)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_4.addWidget(self.label_15, 0, 0, 1, 1)
self.deliveryDateEdit = QtGui.QDateEdit(self.layoutWidget3)
self.deliveryDateEdit.setObjectName(_fromUtf8("deliveryDateEdit"))
self.gridLayout_4.addWidget(self.deliveryDateEdit, 1, 0, 1, 1)
self.gridLayout_4.setColumnMinimumWidth(0, 125)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionNewPurchaseOrder = QtGui.QAction(MainWindow)
self.actionNewPurchaseOrder.setObjectName(_fromUtf8("actionNewPurchaseOrder"))
self.actionView_Purchase_Order = QtGui.QAction(MainWindow)
self.actionView_Purchase_Order.setObjectName(_fromUtf8("actionView_Purchase_Order"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionExit_2 = QtGui.QAction(MainWindow)
self.actionExit_2.setObjectName(_fromUtf8("actionExit_2"))
self.actionPurchase_Order = QtGui.QAction(MainWindow)
self.actionPurchase_Order.setObjectName(_fromUtf8("actionPurchase_Order"))
self.actionViewReports = QtGui.QAction(MainWindow)
self.actionViewReports.setObjectName(_fromUtf8("actionViewReports"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionOpenPurchaseOrder = QtGui.QAction(MainWindow)
self.actionOpenPurchaseOrder.setObjectName(_fromUtf8("actionOpenPurchaseOrder"))
self.actionCopyPurchaseOrder = QtGui.QAction(MainWindow)
self.actionCopyPurchaseOrder.setObjectName(_fromUtf8("actionCopyPurchaseOrder"))
self.actionClearPurchaseOrder = QtGui.QAction(MainWindow)
self.actionClearPurchaseOrder.setObjectName(_fromUtf8("actionClearPurchaseOrder"))
self.actionPrintPurchaseOrder = QtGui.QAction(MainWindow)
self.actionPrintPurchaseOrder.setObjectName(_fromUtf8("actionPrintPurchaseOrder"))
self.actionEditProjects = QtGui.QAction(MainWindow)
self.actionEditProjects.setObjectName(_fromUtf8("actionEditProjects"))
self.actionEditSuppliers = QtGui.QAction(MainWindow)
self.actionEditSuppliers.setObjectName(_fromUtf8("actionEditSuppliers"))
self.actionEditProducts = QtGui.QAction(MainWindow)
self.actionEditProducts.setObjectName(_fromUtf8("actionEditProducts"))
self.actionSavePurchaseOrder = QtGui.QAction(MainWindow)
self.actionSavePurchaseOrder.setObjectName(_fromUtf8("actionSavePurchaseOrder"))
self.actionExportPurchaseOrder = QtGui.QAction(MainWindow)
self.actionExportPurchaseOrder.setObjectName(_fromUtf8("actionExportPurchaseOrder"))
self.actionEditConfiguration = QtGui.QAction(MainWindow)
self.actionEditConfiguration.setObjectName(_fromUtf8("actionEditConfiguration"))
self.menuFile.addAction(self.actionNewPurchaseOrder)
self.menuFile.addAction(self.actionOpenPurchaseOrder)
self.menuFile.addAction(self.actionSavePurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExportPurchaseOrder)
self.menuFile.addAction(self.actionPrintPurchaseOrder)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit_2)
self.menuView.addAction(self.actionViewReports)
self.menuHelp.addAction(self.actionAbout)
self.menuEdit.addAction(self.actionClearPurchaseOrder)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditProjects)
self.menuEdit.addAction(self.actionEditSuppliers)
self.menuEdit.addAction(self.actionEditProducts)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionEditConfiguration)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.label_3.setBuddy(self.orderDateEdit)
self.label_5.setBuddy(self.paymentTermsComboBox)
self.label_18.setBuddy(self.orderStatusComboBox)
self.label_4.setBuddy(self.orderStatusComboBox)
self.label_16.setBuddy(self.notesPlainTextEdit)
self.label_14.setBuddy(self.deliveryAddressPlainTextEdit)
self.label_17.setBuddy(self.gpsCoordinatesLineEdit)
self.label_15.setBuddy(self.deliveryDateEdit)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.newToolButton, self.projectComboBox)
MainWindow.setTabOrder(self.projectComboBox, self.orderDateEdit)
MainWindow.setTabOrder(self.orderDateEdit, self.orderStatusComboBox)
MainWindow.setTabOrder(self.orderStatusComboBox, self.paymentTermsComboBox)
MainWindow.setTabOrder(self.paymentTermsComboBox, self.supplierComboBox)
MainWindow.setTabOrder(self.supplierComboBox, self.productsTableView)
MainWindow.setTabOrder(self.productsTableView, self.deliveryDateEdit)
MainWindow.setTabOrder(self.deliveryDateEdit, self.deliveryAddressPlainTextEdit)
MainWindow.setTabOrder(self.deliveryAddressPlainTextEdit, self.gpsCoordinatesLineEdit)
MainWindow.setTabOrder(self.gpsCoordinatesLineEdit, self.notesPlainTextEdit)
MainWindow.setTabOrder(self.notesPlainTextEdit, self.saveToolButton)
MainWindow.setTabOrder(self.saveToolButton, self.printToolButton)
MainWindow.setTabOrder(self.printToolButton, self.openToolButton)
MainWindow.setTabOrder(self.openToolButton, self.clearToolButton)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.clearToolButton.setToolTip(_translate("MainWindow", "Clear data", None))
self.clearToolButton.setText(_translate("MainWindow", "...", None))
self.saveToolButton.setToolTip(_translate("MainWindow", "Save purchase order", None))
self.saveToolButton.setText(_translate("MainWindow", "...", None))
self.openToolButton.setToolTip(_translate("MainWindow", "Open an existing purchase order", None))
self.openToolButton.setText(_translate("MainWindow", "...", None))
self.newToolButton.setToolTip(_translate("MainWindow", "Create a new purchase order", None))
self.newToolButton.setText(_translate("MainWindow", "...", None))
self.printToolButton.setToolTip(_translate("MainWindow", "Print purchase order", None))
self.printToolButton.setText(_translate("MainWindow", "...", None))
self.exportToolButton.setToolTip(_translate("MainWindow", "Export purchase order to PDF file", None))
self.exportToolButton.setText(_translate("MainWindow", "...", None))
self.orderDetailsGroupBox.setTitle(_translate("MainWindow", "Order Details", None))
self.label_2.setText(_translate("MainWindow", "Order Number:", None))
self.label_3.setText(_translate("MainWindow", "Order Date:", None))
self.label_5.setText(_translate("MainWindow", "Payment Terms:", None))
self.label_18.setText(_translate("MainWindow", "Project:", None))
self.label_4.setText(_translate("MainWindow", "Order Status:", None))
self.taxRateLabel.setText(_translate("MainWindow", "Tax Rate:", None))
self.supplierGroupBox.setTitle(_translate("MainWindow", "Supplier", None))
self.label_11.setText(_translate("MainWindow", "Address:", None))
self.label_8.setText(_translate("MainWindow", "Phone Number:", None))
self.label_9.setText(_translate("MainWindow", "Fax Number:", None))
self.label_7.setText(_translate("MainWindow", "Contact Person:", None))
self.label_10.setText(_translate("MainWindow", "Email:", None))
self.productsGroupBox.setTitle(_translate("MainWindow", "Products", None))
self.totalExcludingTaxLabel.setText(_translate("MainWindow", "Total Excluding Tax:", None))
self.totalTaxLabel.setText(_translate("MainWindow", "Total Tax:", None))
self.totalLabel.setText(_translate("MainWindow", "Total:", None))
self.deliveryGroupBox.setTitle(_translate("MainWindow", "Delivery", None))
self.label_16.setText(_translate("MainWindow", "Notes:", None))
self.label_14.setText(_translate("MainWindow", "Delivery Address:", None))
self.label_17.setText(_translate("MainWindow", "GPS Coordinates:", None))
self.label_15.setText(_translate("MainWindow", "Delivery Date:", None))
self.menuFile.setTitle(_translate("MainWindow", "&File", None))
self.menuView.setTitle(_translate("MainWindow", "&View", None))
self.menuHelp.setTitle(_translate("MainWindow", "&Help", None))
self.menuEdit.setTitle(_translate("MainWindow", "&Edit", None))
self.actionNewPurchaseOrder.setText(_translate("MainWindow", "Create &New Purchase Order", None))
self.actionView_Purchase_Order.setText(_translate("MainWindow", "View Purchase Order...", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
self.actionExit_2.setText(_translate("MainWindow", "E&xit", None))
self.actionPurchase_Order.setText(_translate("MainWindow", "Purchase Order...", None))
self.actionViewReports.setText(_translate("MainWindow", "View &Reports...", None))
self.actionAbout.setText(_translate("MainWindow", "&About", None))
self.actionOpenPurchaseOrder.setText(_translate("MainWindow", "&Open Purchase Order...", None))
self.actionCopyPurchaseOrder.setText(_translate("MainWindow", "&Copy Purchase Order", None))
self.actionClearPurchaseOrder.setText(_translate("MainWindow", "C&lear Purchase Order", None))
self.actionPrintPurchaseOrder.setText(_translate("MainWindow", "&Print Purchase Order...", None))
self.actionEditProjects.setText(_translate("MainWindow", "Edit Projects...", None))
self.actionEditSuppliers.setText(_translate("MainWindow", "Edit Suppliers...", None))
self.actionEditProducts.setText(_translate("MainWindow", "Edit Products...", None))
self.actionSavePurchaseOrder.setText(_translate("MainWindow", "Save Purchase Order", None))
self.actionExportPurchaseOrder.setText(_translate("MainWindow", "Export Purchase Order...", None))
self.actionEditConfiguration.setText(_translate("MainWindow", "Configuration Wizard...", None))
import resources_rc
|
psvnl/podb
|
ui_mainwindow.py
|
Python
|
gpl-3.0
| 28,313
|
# Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from base_classes import EndCondition as _EndCondition
class MinLength(_EndCondition):
'End simulation after duration seconds.'
__slots__ = ['duration']
def __init__(self, value=None, label=None):
self.value = int(value)
_EndCondition.__init__(self, label=label)
def reset(self):
pass
def __call__(self, time, filaments, concentrations):
for f in filaments:
if len(f) < self.value:
return True
return False
|
mark-burnett/filament-dynamics
|
actin_dynamics/primitives/end_conditions/length.py
|
Python
|
gpl-3.0
| 1,203
|
#!/usr/bin/env python
# coding: utf-8
# Binary with Spots
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Model without Spots
# --------------------------
# In[3]:
b.add_dataset('lc', times=phoebe.linspace(0,1,101))
# In[4]:
b.run_compute(irrad_method='none', model='no_spot')
# Adding Spots
# ---------------------
# Let's add a spot to the primary component in our binary.
#
# The 'colat' parameter defines the colatitude on the star measured from its North (spin) Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the other star at t0. See the [spots tutorial](../tutorials/spots.ipynb) for more details.
# In[5]:
b.add_feature('spot', component='primary', feature='spot01', relteff=0.9, radius=30, colat=45, long=90)
# In[6]:
b.run_compute(irrad_method='none', model='with_spot')
# Comparing Light Curves
# ------------------------------
# In[7]:
afig, mplfig = b.plot(show=True, legend=True)
# In[ ]:
|
phoebe-project/phoebe2-docs
|
2.2/examples/binary_spots.py
|
Python
|
gpl-3.0
| 1,686
|
import json
import time
import git
import discord
import os
import aiohttp
from cogs.utils.dataIO import dataIO
from urllib.parse import quote as uriquote
try:
from lxml import etree
except ImportError:
from bs4 import BeautifulSoup
from urllib.parse import parse_qs, quote_plus
#from cogs.utils import common
# @common.deprecation_warn()
def load_config():
with open('settings/config.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_optional_config():
with open('settings/optional_config.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_moderation():
with open('settings/moderation.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_notify_config():
with open('settings/notify.json', 'r') as f:
return json.load(f)
# @common.deprecation_warn()
def load_log_config():
with open('settings/log.json', 'r') as f:
return json.load(f)
def has_passed(oldtime):
if time.time() - 20.0 < oldtime:
return False
return time.time()
def set_status(bot):
if bot.default_status == 'idle':
return discord.Status.idle
elif bot.default_status == 'dnd':
return discord.Status.dnd
else:
return discord.Status.invisible
def user_post(key_users, user):
if time.time() - float(key_users[user][0]) < float(key_users[user][1]):
return False, [time.time(), key_users[user][1]]
else:
log = dataIO.load_json("settings/log.json")
now = time.time()
log["keyusers"][user] = [now, key_users[user][1]]
dataIO.save_json("settings/log.json", log)
return True, [now, key_users[user][1]]
def gc_clear(gc_time):
if time.time() - 3600.0 < gc_time:
return False
return time.time()
def game_time_check(oldtime, interval):
if time.time() - float(interval) < oldtime:
return False
return time.time()
def avatar_time_check(oldtime, interval):
if time.time() - float(interval) < oldtime:
return False
return time.time()
def update_bot(message):
g = git.cmd.Git(working_dir=os.getcwd())
branch = g.execute(["git", "rev-parse", "--abbrev-ref", "HEAD"])
g.execute(["git", "fetch", "origin", branch])
update = g.execute(["git", "remote", "show", "origin"])
if ('up to date' in update or 'fast-forward' in update) and message:
return False
else:
if message is False:
version = 4
else:
version = g.execute(["git", "rev-list", "--right-only", "--count", "{0}...origin/{0}".format(branch)])
version = description = str(int(version))
if int(version) > 4:
version = "4"
commits = g.execute(["git", "rev-list", "--max-count={0}".format(version), "origin/{0}".format(branch)])
commits = commits.split('\n')
em = discord.Embed(color=0x24292E, title='Latest changes for the selfbot:', description='{0} release(s) behind.'.format(description))
for i in range(int(version)):
i = i - 1 # Change i to i -1 to let the formatters below work
title = g.execute(["git", "log", "--format=%ar", "-n", "1", commits[i]])
field = g.execute(["git", "log", "--pretty=oneline", "--abbrev-commit", "--shortstat", commits[i], "^{0}".format(commits[i + 1])])
field = field[8:].strip()
link = 'https://github.com/appu1232/Discord-Selfbot/commit/%s' % commits[i]
em.add_field(name=title, value='{0}\n[Code changes]({1})'.format(field, link), inline=False)
em.set_thumbnail(url='https://image.flaticon.com/icons/png/512/25/25231.png')
em.set_footer(text='Full project: https://github.com/appu1232/Discord-Selfbot')
return em
def cmd_prefix_len():
config = load_config()
return len(config['cmd_prefix'])
def embed_perms(message):
try:
check = message.author.permissions_in(message.channel).embed_links
except:
check = True
return check
def get_user(message, user):
try:
member = message.mentions[0]
except:
member = message.guild.get_member_named(user)
if not member:
try:
member = message.guild.get_member(int(user))
except ValueError:
pass
if not member:
return None
return member
def find_channel(channel_list, text):
if text.isdigit():
found_channel = discord.utils.get(channel_list, id=int(text))
elif text.startswith("<#") and text.endswith(">"):
found_channel = discord.utils.get(channel_list,
id=text.replace("<", "").replace(">", "").replace("#", ""))
else:
found_channel = discord.utils.get(channel_list, name=text)
return found_channel
async def get_google_entries(query):
url = 'https://www.google.com/search?q={}'.format(uriquote(query))
params = {
'safe': 'off',
'lr': 'lang_en',
'h1': 'en'
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64)'
}
entries = []
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params, headers=headers) as resp:
if resp.status != 200:
config = load_optional_config()
async with session.get("https://www.googleapis.com/customsearch/v1?q=" + quote_plus(query) + "&start=" + '1' + "&key=" + config['google_api_key'] + "&cx=" + config['custom_search_engine']) as resp:
result = json.loads(await resp.text())
return None, result['items'][0]['link']
try:
root = etree.fromstring(await resp.text(), etree.HTMLParser())
search_nodes = root.findall(".//div[@class='g']")
for node in search_nodes:
url_node = node.find('.//h3/a')
if url_node is None:
continue
url = url_node.attrib['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0]
entries.append(url)
except NameError:
root = BeautifulSoup(await resp.text(), 'html.parser')
for result in root.find_all("div", class_='g'):
url_node = result.find('h3')
if url_node:
for link in url_node.find_all('a', href=True):
url = link['href']
if not url.startswith('/url?'):
continue
url = parse_qs(url[5:])['q'][0]
entries.append(url)
return entries, root
def attach_perms(message):
return message.author.permissions_in(message.channel).attach_files
def parse_prefix(bot, text):
prefix = bot.cmd_prefix
if type(prefix) is list:
prefix = prefix[0]
return text.replace("[c]", prefix).replace("[b]", bot.bot_prefix)
|
Bluscream/Discord-Selfbot
|
cogs/utils/checks.py
|
Python
|
gpl-3.0
| 7,337
|
from tasty.types import conversions
from tasty.types import *
from tasty.types.driver import TestDriver
__params__ = {'la': 32, 'lb': 32, 'da': 10}
driver = TestDriver()
def protocol(client, server, params):
server.ga = Garbled(val=Unsigned(bitlen=764, dim=[1], signed=False, passive=True, empty=True), signed=False, bitlen=764, dim=[1])
server.gb = Garbled(val=Unsigned(bitlen=764, dim=[1], signed=False, passive=True, empty=True), signed=False, bitlen=764, dim=[1])
conversions.Garbled_Garbled_send(server.ga, client.ga, 764, [1], False)
conversions.Garbled_Garbled_send(server.gb, client.gb, 764, [1], False)
client.gc = client.ga * client.gb
client.c = Unsigned(val=client.gc, passive=True, signed=False, bitlen=1528, dim=[1])
|
tastyproject/tasty
|
tasty/tests/functional/protocols/mul/garbled_server_server_client/protocol_setup_server.py
|
Python
|
gpl-3.0
| 756
|
# -*- coding: utf-8 -*-
#
# This file is part of hopr: https://github.com/hopr/hopr.
#
# Hopr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hopr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hopr. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest as ut
import tempfile
from mock import MagicMock, sentinel, call
from hopr.tool.run import * # Run, parse_args, run
# TODO: Suppress log output during tests.
class Test1Misc(ut.TestCase):
def test_timeout(self):
dt = 0.01
e = 0.001
timeout = Timeout(dt)
t1 = time()
while(True):
a = timeout()
t2 = time()
if t2 - t1 < dt-e:
self.assertEqual(a, False)
else:
break
while(t2 - t1 <= dt + e):
t2 = time()
self.assertEqual(timeout(), True)
class TestParseArgs(ut.TestCase):
def setUp(self):
self.args = {'no_grab': False,
'timeout': 5,
'log_level': 'info',
'print_keymap': False,
'log_file': '',
'config_dir': '',
}
def test2_parse_args(self):
x = parse_args('--no-grab -t 10 --log-level warning'.split())
self.args.update({'no_grab': True,
'timeout': 10,
'log_level': 'warning',
})
self.assertEqual(self.args, vars(x))
def test2_no_timeout(self):
x = parse_args('-x'.split())
self.args.update({'timeout': 0})
self.assertEqual(self.args, vars(x))
def test1_parse_args_defaults(self):
x = parse_args(''.split())
self.assertEqual({'no_grab': False,
'timeout': 5,
'log_level': 'info',
'log_file': '',
'config_dir': '',
'print_keymap': False,
}, vars(x))
def test1_parse_args_defaults(self):
x = parse_args('--log-file log.txt'.split())
self.assertEqual({'no_grab': False,
'timeout': 5,
'log_level': 'info',
'log_file': 'log.txt',
'config_dir': '',
'print_keymap': False,
}, vars(x))
class TestRun(ut.TestCase):
def setUp(self):
params = dict(event_parser=MagicMock(name='parser'),
event_wrapper=MagicMock(name='event_wrapper'),
find_keyboards=MagicMock(name='find_keyboards'),
read_events=MagicMock(name='read_events'),
grab_keyboards=MagicMock(name='grab_keyboards'))
for k,v in list(params.items()):
setattr(self, k, v)
self.run = partial(run, **params)
def test1_no_events(self):
self.run(timeout=5,
no_grab=True)
def test2_keyboards_are_optionally_grabbed(self):
kbds = [sentinel.kbd1, sentinel.kbd2]
self.find_keyboards.return_value = kbds
self.run(no_grab=True)
self.grab_keyboards.assert_not_called()
self.run(no_grab=False)
self.grab_keyboards.assert_called_once_with(kbds)
def test2_keyboards_events_are_read(self):
kbds = [sentinel.kbd1, sentinel.kbd2]
self.find_keyboards.return_value = kbds
self.run()
self.read_events.assert_called_once_with(kbds)
def test2_events_are_wrapped_before_parsing(self):
events = [sentinel.event]
self.read_events.return_value = events
self.event_wrapper.return_value = sentinel.wrapped_event
self.run()
self.event_wrapper.assert_called_once_with(sentinel.event)
self.event_parser.assert_called_once_with(sentinel.wrapped_event)
def test2_events_are_sent_to_parser(self):
events = [sentinel.event1, sentinel.event2]
self.read_events.return_value = events
self.event_wrapper.side_effect = lambda x : x
self.run()
self.event_parser.assert_has_calls([call(e) for e in events])
def test3_timeout(self):
self.run(timeout=-1)
class TestRunFunction(ut.TestCase):
def test(self):
backend = MagicMock(name='backend')
make_eventparser = MagicMock(name='make_eventparser')
args = '--log-level=error'.split()
run_parse_args(backend=backend,
make_eventparser=make_eventparser,
args=args)
def test_log_file(self):
f = tempfile.NamedTemporaryFile('r')
backend = MagicMock(name='backend')
make_eventparser = MagicMock(name='make_eventparser')
args = ['--log-level', 'debug', '--log-file', f.name]
run_parse_args(backend=backend,
make_eventparser=make_eventparser,
args=args)
logging.getLogger().debug('Test Message')
text = f.read()
self.assertTrue(text.strip().endswith('Test Message'))
if __name__ == "__main__":
# import logging
# logging.getLogger().setLevel('ERROR')
ut.main(failfast=True, exit=False)
|
hopr/hopr
|
hopr/tool/test_run.py
|
Python
|
gpl-3.0
| 5,871
|
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
import time
from ControlUsuarios.forms import *
from ControlUsuarios.models import UserProfile
# Create your views here.
from django.http import HttpResponse
from django.http import JsonResponse
from django.shortcuts import render
from django.shortcuts import redirect
from django import forms
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.views.generic.base import View
from datetime import datetime
from bson import Binary, Code
from bson.json_util import dumps
from bson.json_util import loads
from clase import *
gestorClase=ClaseDriver()
@csrf_exempt
def index(request):
if request.method == 'GET':
session_num=gestorClase.database.sesion.find({}).count()
session_tag="default"
if session_num>0:
session_tag=gestorClase.database.sesion.find({})
lista=[]
for i in session_tag:
print i
lista.append(i)
print lista[0]["clave_sesion"]
return render(request, 'ControlUsuarios/session_android.html',{"qr":lista[0]["clave_sesion"],"fecha":lista[0]["fecha_sesion"]})
#return render(request, 'registration/login.html',{})
@csrf_exempt
def sesion(request):
clase=gestorClase.database.clase.find()
if request.method == 'POST':
print "entrando por post"
form = SessionForm(request.POST)
if form.is_valid():
session_tag=form.data['session_tag']
print session_tag
gestorClase.createSesion(session_tag)
return render(request, 'ControlUsuarios/sessions.html',{'form': form,"qr":session_tag,"clase":clase} )
else:
session_num=gestorClase.database.sesion.find({}).count()
session_tag="default"
if session_num>0:
session_tag=gestorClase.database.sesion.find({})
lista=[]
for i in session_tag:
print i
lista.append(i)
print lista[0]["clave_sesion"]
form=SessionForm()
return render(request, 'ControlUsuarios/sessions.html',{'form': form,"qr":lista[0]["clave_sesion"],"clase":clase} )
class Preferencias(View):
def get(self, request):
print "Entrando por el get"
form=FormEntrada()
return render(request, 'ControlUsuarios/preferencias.html', {'form': form})
def post(self, request):
print "Entrando por el post"
reader_clase=None
form = FormEntrada(request.POST, request.FILES)
if form.is_valid():
fichero1=request.FILES.get('file_clase',None)
if fichero1 is not None:
fieldnames = ("NOMBRE","DNI")
reader_clase = csv.DictReader(request.FILES['file_clase'], fieldnames)
gestorClase.createClaseFromReader(reader_clase)
return redirect('/Preferencias',{'form':form})
else:
print "formulario invalido"
#form = FormEntrada()
return render(request, 'noinventory/Preferencias.html', {'form': form})
@csrf_exempt
def borrarTodo(request):
if request.method == 'GET':
gestorClase.database.clase.remove()
cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]}
for i in cl["Alumnos"]:
i["assitencia"]="False"
print i
gestorClase.database.clase.insert(i)
aux3=[]
respuesta={}
lista_alumnos=gestorClase.database.clase.find({})
for a in lista_alumnos:
print a["NOMBRE"]
aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]}
aux3.append(aux4)
respuesta={"alumnos":aux3}
return JsonResponse(respuesta,safe=False)
else:
gestorClase.database.clase.remove()
gestorClase.database.sesion.remove()
default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"}
aux7=[]
aux7.append(default)
respuesta={"alumnos":aux7}
return JsonResponse(respuesta,safe=False)
@csrf_exempt
def inicializarClase(request):
if request.method == 'GET':
gestorClase.database.clase.remove()
cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]}
for i in cl["Alumnos"]:
i["assitencia"]="False"
print i
gestorClase.database.clase.insert(i)
aux3=[]
respuesta={}
lista_alumnos=gestorClase.database.clase.find({})
for a in lista_alumnos:
print a["NOMBRE"]
aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]}
aux3.append(aux4)
respuesta={"alumnos":aux3}
return JsonResponse(respuesta,safe=False)
else:
gestorClase.database.clase.remove()
cl={"Alumnos": [{"NOMBRE": "Hugo Barzano Cruz","DNI": "77138361"}, {"NOMBRE": "Mariano Palomo Villafranca","DNI": "66666666z"}]}
for i in cl["Alumnos"]:
i["assitencia"]="False"
print i
gestorClase.database.clase.insert(i)
aux3=[]
respuesta={}
lista_alumnos=gestorClase.database.clase.find({})
for a in lista_alumnos:
print a["NOMBRE"]
aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]}
aux3.append(aux4)
print respuesta
respuesta={"alumnos":aux3}
#return JsonResponse(respuesta,safe=False)
return JsonResponse(respuesta,safe=False)
@csrf_exempt
def setClaveAndroid(request):
if request.method == 'POST':
mydic=dict(request.POST)
print mydic["clave"][0]
if mydic["clave"][0] == "":
gestorClase.createSesion("default")
else:
gestorClase.createSesion(mydic["clave"][0])
return HttpResponse("Ok")
@csrf_exempt
def alumnosJson(request):
if request.method == 'GET':
default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"}
aux7=[]
aux7.append(default)
respuesta={"alumnos":aux7}
aux=[]
aux3=[]
numero_alumnos=gestorClase.database.clase.find({}).count()
if numero_alumnos>0:
lista_alumnos=gestorClase.database.clase.find({})
for a in lista_alumnos:
print a["NOMBRE"]
aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]}
aux3.append(aux4)
print respuesta
respuesta={"alumnos":aux3}
return JsonResponse(respuesta,safe=False)
else:
return JsonResponse(respuesta,safe=False)
else:
default={"NOMBRE":"Nombre","DNI":"Dni","assitencia":"asistencia"}
aux7=[]
aux7.append(default)
respuesta={"alumnos":aux7}
aux=[]
aux3=[]
print "entrado por post"
numero_alumnos=gestorClase.database.clase.find({}).count()
if numero_alumnos>0:
lista_alumnos=gestorClase.database.clase.find({})
for a in lista_alumnos:
print a["NOMBRE"]
aux4={"NOMBRE":a["NOMBRE"],"DNI":a["DNI"],"assitencia":a["assitencia"]}
aux3.append(aux4)
print respuesta
respuesta={"alumnos":aux3}
return JsonResponse(respuesta,safe=False)
else:
return JsonResponse(respuesta,safe=False)
@csrf_exempt
def CheckFromQr(request):
if request.method == 'POST':
mydic=dict(request.POST)
print mydic
dni=mydic["dni"][0]
aux=mydic["scaner"][0]
alumno=None
alumno=gestorClase.database.clase.find({"DNI":str(dni)})
print alumno[0]
sesion=gestorClase.database.sesion.find({"fecha_sesion":datetime.now().strftime('%Y-%m-%d')})
print "superado alumno y fecha"
if alumno != None:
if sesion[0]["clave_sesion"]==aux:
gestorClase.database.clase.update({"_id" :alumno[0]["_id"] },{"$set" : {"assitencia" : "True"}})
else:
print "Clave de assitencia incorrecta"
else:
print "Usuario no forma parte de la clase"
print "Dni: "+dni
print "Clave sesion:"+aux
return HttpResponse("OK")
else:
print "recibido get"
return HttpResponse("gettttttt")
@csrf_exempt
def CheckFromNfc(request):
if request.method == 'POST':
mydic=dict(request.POST)
print mydic
dni=mydic["dni"][0]
aux=mydic["nfc"][0]
alumno=None
alumno=gestorClase.database.clase.find({"DNI":str(dni)})
print alumno[0]
sesion=gestorClase.database.sesion.find({"fecha_sesion":datetime.now().strftime('%Y-%m-%d')})
print "superado alumno y fecha"
if alumno != None:
if sesion[0]["clave_sesion"]==aux:
gestorClase.database.clase.update({"_id" :alumno[0]["_id"] },{"$set" : {"assitencia" : "True"}})
else:
print "Clave de assitencia incorrecta"
else:
print "Usuario no forma parte de la clase"
print "Dni: "+dni
print "Clave sesion:"+aux
return HttpResponse("OK")
else:
print "recibido get"
# print request.GET['contenido_scaner']
return HttpResponse("gettttttt")
######################### REGISTRO DE USUARIOS ############################################
@csrf_exempt
def androidLogin(request):
if request.method=='POST':
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user:
# Is the account active? It could have been disabled.
if user.is_active:
u=User.objects.get(username=user.username)
user_profile = UserProfile.objects.get(user=user)
login(request, user)
#data="nombre_usuario :"+username
return HttpResponse(user_profile.__dni__())
else:
return HttpResponse("Your account is disabled.")
else:
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
else:
print "entrando por get"
return HttpResponse()
@csrf_exempt
def androidRegister(request):
if request.method=='POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid():
if profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
return HttpResponse("success")
else:
return HttpResponse("Invalid User or Dni")
else:
return HttpResponse("Username exist or Invalid Email")
else:
print "entrando por get"
return HttpResponse()
def register(request):
# A boolean value for telling the template whether the registration was successful.
# Set to False initially. Code changes value to True when registration succeeds.
registered = False
# If it's a HTTP POST, we're interested in processing form data.
if request.method == 'POST':
# Attempt to grab information from the raw form information.
# Note that we make use of both UserForm and UserProfileForm.
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid() and profile_form.is_valid():
# Save the user's form data to the database.
user = user_form.save()
# Now we hash the password with the set_password method.
# Once hashed, we can update the user object.
user.set_password(user.password)
user.save()
# Now sort out the UserProfile instance.
# Since we need to set the user attribute ourselves, we set commit=False.
# This delays saving the model until we're ready to avoid integrity problems.
profile = profile_form.save(commit=False)
profile.user = user
# Now we save the UserProfile model instance.
profile.save()
# Update our variable to tell the template registration was successful.
registered = True
#else:
#return HttpResponseRedirect('/')
# Invalid form or forms - mistakes or something else?
# Print problems to the terminal.
# They'll also be shown to the user.
else:
print user_form.errors, profile_form.errors
#return redirect('registration/register.html',{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
#print user_form.errors, profile_form.errors
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserForm()
profile_form = UserProfileForm()
# Render the template depending on the context.
return render(request, 'registration/register.html',{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
@csrf_exempt
def user_login(request):
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
# We use request.POST.get('<variable>') as opposed to request.POST['<variable>'],
# because the request.POST.get('<variable>') returns None, if the value does not exist,
# while the request.POST['<variable>'] will raise key error exception
username = request.POST.get('username')
password = request.POST.get('password')
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
u=User.objects.get(username=user.username)
request.session['username'] = u.username
user_profile = UserProfile.objects.get(user=user)
#print user_profile.__organizacion__()
request.session['dni'] = user_profile.__dni__()
login(request, user)
return HttpResponseRedirect('/sesion/')
else:
# An inactive account was used - no logging in!
return HttpResponse("Your account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
else:
# No context variables to pass to the template system, hence the
# blank dictionary object...
return render(request, 'registration/login.html', {})
@login_required
def user_logout(request):
# Since we know the user is logged in, we can now just log them out.
del request.session['username']
del request.session['dni']
logout(request)
# Take the user back to the homepage.
return HttpResponseRedirect('/')
|
hugobarzano/DispositivosMovilesBackEnd
|
ControlUsuarios/views.py
|
Python
|
gpl-3.0
| 16,446
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
import numpy as np
def predictClothesGeneral(temp):
dataFile = open("data.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
Y2 = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(int(data[i].split(":")[3]))
Y2.append(int(data[i].split(":")[4]))
clf = RandomForestClassifier(n_estimators=25)
clf2 = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
clf2.fit(X,Y2)
pants = clf.predict([[temp]])
tops = clf2.predict([[temp]])
s = "I recommend you wear a pair of "
if pants == 1:
s = s + "jeans"
else:
s = s + "khaki shorts"
s = s + " and a "
if tops == 1:
s = s + "shirt, its a nice day out!"
elif tops == 2:
s = s + "sweat shirt."
else:
s = s + "jacket, it will be chilly today."
return s
def predictFromFileGeneral(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... " + predictClothesGeneral(float(data2[1].split(",")[0]))
def addToKnownList(shirt, temp):
dataFile = open("userAdded.txt", 'a')
dataFile.write(str(shirt + ":" + str(temp)) + '\n')
def predictClothesData(temp):
dataFile = open("userAdded.txt")
data = dataFile.read()
data = data.split("\n")
X = []
Y = []
for i in range(0,len(data) - 1):
X.append([float(data[i].split(":")[1])])
Y.append(data[i].split(":")[0])
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X,Y)
predict = clf.predict([[temp]])
return predict
def predictFromFileData(fileName):
fi = open(fileName)
data = fi.read().split("\n")
for i in range(0,len(data) - 1):
data2 = data[i].split(":")
print "At " + data2[1].split(",")[0] + " degrees... I would recommend a " + predictClothesData(float(data2[1].split(",")[0]))[0]
|
epaglier/Project-JARVIS
|
jarvis-features/Weather AI/weatherai.py
|
Python
|
gpl-3.0
| 2,382
|
""" The benchmarks below are useful for testing performance when making changes to the maze algorithms. """
from datetime import datetime
from sysconfig import get_python_version
from timeit import Timer
from mazelib import __version__ as version
# CONFIG
SIZES = [5, 10, 25, 50, 100]
ITERATIONS = [100, 50, 20, 5, 1]
GENERATORS = ['AldousBroder', 'BacktrackingGenerator', 'BinaryTree', 'HuntAndKill', 'Prims', 'Sidewinder',
'TrivialMaze', 'Wilsons']
SOLVERS = ['Collision', 'Tremaux']
def main():
times = run_benchmarks()
print_benchmarks(times)
def run_benchmarks():
""" Run the benchmarks.
An annoying screen-print will occur so that you know your progress, as these tests might take a while.
Returns:
list: 2D list of the team each generator/solver combination took
"""
times = [[0.0] * len(SIZES) for _ in range(len(GENERATORS) * len(SOLVERS))]
row = -1
for generator in GENERATORS:
for solver in SOLVERS:
row += 1
print('Run #%d: %s & %s' % (row, generator, solver))
for col, size in enumerate(SIZES):
print(col)
setup = """from mazelib import Maze
from mazelib.solve.%(solv)s import %(solv)s
from mazelib.generate.%(gen)s import %(gen)s
""" % {'solv': solver, 'gen': generator}
logic = """m = Maze()
m.generator = %(gen)s(%(size)d, %(size)d)
m.solver = %(solv)s()
m.generate()
m.generate_entrances()
m.solve()
""" % {'solv': solver, 'gen': generator, 'size': size}
t = Timer(logic, setup=setup)
time = t.timeit(ITERATIONS[col])
times[row]
times[row][col] = time
return times
def print_benchmarks(times):
""" Pretty print for the benchmark results, with a detailed CSV at the end.
Args:
times (list): timing results for the benchmark runs
Results: None
"""
print('\nmazelib benchmarking')
print(datetime.now().strftime('%Y-%m-%d %H:%M'))
print('Python version: {0}'.format(get_python_version()))
print('mazelib version: {0}'.format(version))
print('\nTotal Time (seconds): %.5f\n' % sum([sum(times_row) for times_row in times]))
print('\nmaze size,' + ','.join([str(s) for s in SIZES]))
row = -1
for generator in GENERATORS:
for solver in SOLVERS:
row += 1
method = generator + '-' + solver + ','
print(method + ','.join(['%.5f' % time for time in times[row]]))
if __name__ == '__main__':
main()
|
theJollySin/mazelib
|
benchmarks.py
|
Python
|
gpl-3.0
| 2,537
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field last_read_by on 'Message'
m2m_table_name = db.shorten_name(u'digiapproval_message_last_read_by')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('message', models.ForeignKey(orm[u'digiapproval.message'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['message_id', 'user_id'])
def backwards(self, orm):
# Removing M2M table for field last_read_by on 'Message'
db.delete_table(db.shorten_name(u'digiapproval_message_last_read_by'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'digiapproval.customeraccount': {
'Meta': {'object_name': 'CustomerAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CUSTOMER'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_accounts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['digiapproval.CustomerAccount']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'digiapproval.message': {
'Meta': {'object_name': 'Message'},
'_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'last_read'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.task': {
'Meta': {'object_name': 'Task'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('jsonfield.fields.JSONField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': "'36'"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.userfile': {
'Meta': {'object_name': 'UserFile'},
'_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'virus_status': ('django.db.models.fields.CharField', [], {'default': "'UNSCANNED'", 'max_length': '16'})
},
u'digiapproval.workflow': {
'Meta': {'object_name': 'Workflow'},
'approver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_approver'", 'to': u"orm['auth.User']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_customer'", 'to': u"orm['digiapproval.CustomerAccount']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.WorkflowSpec']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'STARTED'", 'max_length': '10'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'70558195da6a4488b22d6e8749f86580'", 'max_length': '36'}),
'workflow': ('digiapproval_project.apps.digiapproval.fields.WorkflowField', [], {})
},
u'digiapproval.workflowspec': {
'Meta': {'object_name': 'WorkflowSpec'},
'approvers': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_approvers'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'delegators': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_delegators'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'64'"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflowspecs_owner'", 'to': u"orm['auth.Group']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spec': ('digiapproval_project.apps.digiapproval.fields.WorkflowSpecField', [], {}),
'toplevel': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['digiapproval']
|
tsujamin/digi-approval
|
src/digiapproval_project/digiapproval_project/apps/digiapproval/migrations/0009_add_last_read_mm_auto.py
|
Python
|
gpl-3.0
| 8,804
|