code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Autotest command parser
@copyright: Don Zickus <dzickus@redhat.com> 2011
"""
import os, re, sys, logging
from autotest.client import os_dep, utils
from autotest.client.shared import global_config, logging_config, logging_manager
from autotest.client.shared import packages
GLOBAL_CONFIG = global_config.global_config
LOCALDIRTEST = "tests"
GLOBALDIRTEST = GLOBAL_CONFIG.get_config_value('COMMON',
'test_dir',
default="")
try:
autodir = os.path.abspath(os.environ['AUTODIR'])
except KeyError:
autodir = GLOBAL_CONFIG.get_config_value('COMMON',
'autotest_top_path')
tmpdir = os.path.join(autodir, 'tmp')
output_dir = GLOBAL_CONFIG.get_config_value('COMMON',
'test_output_dir',
default=tmpdir)
FETCHDIRTEST = os.path.join(output_dir, 'site_tests')
if not os.path.isdir(FETCHDIRTEST):
os.makedirs(FETCHDIRTEST)
DEBUG = False
class CmdParserLoggingConfig(logging_config.LoggingConfig):
"""
Used with the sole purpose of providing convenient logging setup
for the KVM test auxiliary programs.
"""
def configure_logging(self, results_dir=None, verbose=False):
super(CmdParserLoggingConfig, self).configure_logging(use_console=True,
verbose=False)
logging_manager.configure_logging(CmdParserLoggingConfig())
class CommandParser(object):
"""
A client-side command wrapper for the autotest client.
"""
COMMAND_LIST = ['help', 'list', 'run', 'fetch']
@classmethod
def _print_control_list(cls, pipe, path):
"""
Print the list of control files available.
@param pipe: Pipe opened to an output stream (may be a pager)
@param path: Path we'll walk through
"""
if not os.path.isdir(path):
pipe.write("Test directory not available\n")
return
pipe.write(" %-50s %s\n" % ("[Control]", "[Description]"))
# The strategy here is to walk the root directory
# looking for "*control*" files in some directory
# and printing them out
for root, _, files in sorted(os.walk(path)):
for name in files:
if re.search("control", name):
# strip full path
basename = re.sub(path + "/", "", root)
text = "%s/%s" % (basename, name)
desc = "None"
if name == "control":
# Imply /control by listing only directory name
text = "%s" % basename
for line in open(root + "/" + name).readlines():
if re.match("NAME", line):
# We have a description line
desc = re.split("=\s*", line,
maxsplit=1)[1].rstrip()
try:
desc = desc[1:-1]
except IndexError:
pass
break
pipe.write(' %-50s %s\n' % (text, desc))
def fetch(self, args):
"""
fetch a remote control file or packages
"""
if not len(args):
self.help()
url = args.pop(0)
if not utils.is_url(url):
logging.info("Not a remote url, nothing to fetch (%s)", url)
self.help()
if len(args):
name = args.pop(0)
else:
name = ""
logging.info("Fetching file %s:%s", url, name)
pkg_dir = os.path.join(output_dir, 'packages')
install_dir = os.path.join(FETCHDIRTEST, name)
pkgmgr = packages.PackageManager(output_dir,
run_function_dargs={'timeout':3600})
pkgmgr.install_pkg(name, 'test', pkg_dir, install_dir,
repo_url=url)
raise SystemExit(0)
@classmethod
def help(cls):
"""
List the commands and their usage strings.
@param args is not used here.
"""
logging.info("Commands:")
logging.info("fetch <url> [<file>]\tFetch a remote file/package and install it")
logging.info("\tgit://...:[<branch>] [<file/directory>]")
logging.info("\thttp://... [<file>]")
logging.info("help\t\t\tOutput a list of supported commands")
logging.info("list\t\t\tOutput a list of available tests")
logging.info("run <test> [<args>]\tFind given <test> in path and run with args")
raise SystemExit(0)
@classmethod
def list_tests(cls):
"""
List the available tests for users to choose from
"""
# One favorite feature from git :-)
try:
less_cmd = os_dep.command('less')
pipe = os.popen('%s -FRSX' % less_cmd, 'w')
except ValueError:
pipe = sys.stdout
pipe.write("List of tests available\n")
pipe.write("Unless otherwise specified, outputs imply /control files\n")
pipe.write("\n")
# Walk local ./tests directory
dirtest = os.path.join(os.path.abspath(os.path.curdir), LOCALDIRTEST)
# Don't repeat autodirtest results
if not os.environ['AUTODIRTEST']:
dirtest = os.environ['AUTODIRTEST']
pipe.write("Local tests (%s)\n" % dirtest)
cls._print_control_list(pipe, dirtest)
pipe.write("\n")
# Walk fetchdirtest directory
if FETCHDIRTEST and os.path.isdir(FETCHDIRTEST):
dirtest = FETCHDIRTEST
pipe.write("Remotely fetched tests (%s)\n" % dirtest)
cls._print_control_list(pipe, dirtest)
pipe.write("\n")
# Walk globaldirtests directory
if GLOBALDIRTEST and os.path.isdir(GLOBALDIRTEST):
dirtest = GLOBALDIRTEST
pipe.write("Globally imported tests (%s)\n" % dirtest)
cls._print_control_list(pipe, dirtest)
pipe.write("\n")
# Walk autodirtest directory
dirtest = os.environ['AUTODIRTEST']
pipe.write("Autotest prepackaged tests (%s)\n" % dirtest)
cls._print_control_list(pipe, dirtest)
pipe.close()
raise SystemExit(0)
def parse_args(self, args):
"""
Process a client side command.
@param args: Command line args.
"""
if len(args) and args[0] in self.COMMAND_LIST:
cmd = args.pop(0)
else:
# Do things the traditional way
return args
# List is a python reserved word
if cmd == 'list':
cmd = 'list_tests'
try:
try:
args = getattr(self, cmd)(args)
except TypeError:
args = getattr(self, cmd)()
except SystemExit, return_code:
sys.exit(return_code.code)
except Exception, error_detail:
if DEBUG:
raise
sys.stderr.write("Command failed: %s -> %s\n" % (cmd, error_detail))
self.help()
sys.exit(1)
# Args are cleaned up, return to process the traditional way
return args
def run(self, args):
"""
Wrap args with a path and send it back to autotest.
"""
if not len(args):
self.help()
test = args.pop(0)
# Autotest works on control files
if not re.search("control", test):
test = test + "/control"
localdir = os.path.join(os.path.abspath(os.path.curdir), LOCALDIRTEST)
fetchdir = FETCHDIRTEST
globaldir = GLOBALDIRTEST
autodir = os.environ['AUTODIRTEST']
for dirtest in [localdir, fetchdir, globaldir, autodir]:
d = os.path.join(dirtest, test)
if os.path.isfile(d):
args.insert(0, d)
return args
logging.error("Can not find test %s", test)
raise SystemExit(1)
|
ColinIanKing/autotest
|
client/cmdparser.py
|
Python
|
gpl-2.0
| 8,174
|
#!/bin/python
# coding: utf-8
import tornado.ioloop
import tornado.web
import mosquitto
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('<html><body>'
'<form action="/" method="post">'
'<input type="text" name="message">'
'<input type="submit" value="ใฏใชใ">'
'</form></body></html>')
def post(self):
self.write('<html><body>'
'<form action="/" method="post">'
'<input type="text" name="message">'
'<input type="submit" value="ใฏใชใ">'
'</form></body></html>')
payload = str(self.get_argument("message"))
self.write(u"ใใในใฃใ่จ่ใฏ " + payload)
client = mosquitto.Mosquitto("test-client")
client.connect("hostname")
client.publish("topic", payload, 1)
client.disconnect()
if __name__ == "__main__":
client = mosquitto.Mosquitto("test-client")
client.connect("hostname")
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(80)
tornado.ioloop.IOLoop.instance().start()
|
aquaviter/speak_raspberry_pi
|
speak.py
|
Python
|
gpl-2.0
| 1,264
|
import os
from fabric.api import task, local, lcd, env
@task
def provision(args=''):
"""
Provision the box using ansible, optionally pass in some args,
e.g:
``fab use:vagrant provision:'--tags env'``
"""
with lcd(os.path.join(env.root, 'provisioning')):
local('ansible-playbook {} {}'.format(env.playbook, args))
@task
def vm(command='list-commands'):
"""
Use this projects virtual machine with arbitrary commands
e.g:
``fab vm:up``
"""
vagrant_directory = os.path.join(
env.root, 'provisioning', os.path.dirname(env.playbook)
)
with lcd(vagrant_directory):
local('vagrant {}'.format(command))
|
farridav/fabric-magento
|
provisioning.py
|
Python
|
gpl-2.0
| 689
|
# Django
from django import forms
# local Django
from volunteer.models import Volunteer
from shift.models import Report
class ReportForm(forms.Form):
event_name = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(0-9)|(\s)|(\.)|(,)|(\-)|(!)]+$',
max_length=75,
required=False)
job_name = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)]+$', max_length=75, required=False)
start_date = forms.DateField(required=False)
end_date = forms.DateField(required=False)
class Meta:
model = Report
class SearchVolunteerForm(forms.Form):
first_name = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=30, required=False)
last_name = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=30, required=False)
city = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=75, required=False)
state = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=75, required=False)
country = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=75, required=False)
organization = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(\s)|(\-)]+$', max_length=75, required=False)
event = forms.CharField(required=False)
job = forms.CharField(required=False)
class VolunteerForm(forms.ModelForm):
unlisted_organization = forms.RegexField(
regex=r'^[(A-Z)|(a-z)|(0-9)|(\s)|(\-)|(:)]+$',
max_length=100,
required=False
)
class Meta:
model = Volunteer
fields = [
'first_name', 'last_name', 'address',
'phone_number', 'email', 'websites',
'description', 'resume', 'resume_file',
'reminder_days'
]
|
systers/vms
|
vms/volunteer/forms.py
|
Python
|
gpl-2.0
| 1,765
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import required modules
import time
import subprocess
import RPi.GPIO as GPIO
# set GPIO pin with connected button
GPIOPin = 14
GPIOPinLED = 4
GPIOPinRelay1 = 22
GPIOPinRelay2 = 27
GPIO.setwarnings(False)
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(GPIOPinLED, GPIO.OUT)
#GPIO.setup(GPIOPinRelay1, GPIO.OUT)
#GPIO.setup(GPIOPinRelay2, GPIO.OUT)
# main function
def main():
value = 0
while True:
# increment value if button is pressed
if not GPIO.input(GPIOPin):
GPIO.output(GPIOPinRelay1, False)
GPIO.output(GPIOPinRelay2, False)
GPIO.output(GPIOPinLED, False)
if value > 0:
subprocess.call(["mpc", "stop"])
value = 0
# restart selected if value is less than 3
elif GPIO.input(GPIOPin):
GPIO.output(GPIOPinRelay1, True)
GPIO.output(GPIOPinRelay2, True)
GPIO.output(GPIOPinLED, True)
if value < 1:
subprocess.call(["mpc", "play"])
value = 1
# wait 500ms
time.sleep(0.5)
return 0
if __name__ == '__main__':
# use GPIO pin numbering convention
GPIO.setmode(GPIO.BCM)
# set up GPIO pin for input
GPIO.setup(GPIOPin, GPIO.IN)
GPIO.setup(GPIOPinLED, GPIO.OUT)
GPIO.setup(GPIOPinRelay1, GPIO.OUT)
GPIO.setup(GPIOPinRelay2, GPIO.OUT)
# call main function
main()
|
sbiermann/VE301
|
scripts/radioswitch.py
|
Python
|
gpl-2.0
| 1,333
|
# -*- coding: utf-8 -*-
#
# bodhi documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 10 09:29:50 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['cornice.ext.sphinxext', 'sphinx.ext.autodoc',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bodhi'
copyright = u'2007-{}, Red Hat, Inc.'.format(datetime.datetime.utcnow().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.12'
# The full version, including alpha/beta/rc tags.
release = '2.12.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'logo.svg',
'logo_name': True,
'github_user': 'fedora-infra',
'github_repo': 'bodhi',
'page_width': '1040px',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bodhidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bodhi.tex', u'bodhi Documentation',
u'Luke Macken', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man_pages/bodhi', 'bodhi', u'manage Fedora updates', ['Randy Barlow', 'Luke Macken'], 1),
('man_pages/bodhi-approve-testing', 'bodhi-approve-testing', u'approve updates',
['Randy Barlow'], 1),
('man_pages/bodhi-check-policies', 'bodhi-check-policies', u'check policies',
['Matt Jia'], 1),
('man_pages/bodhi-clean-old-mashes', 'bodhi-clean-old-mashes', u'clean old mashes',
['Randy Barlow'], 1),
('man_pages/bodhi-push', 'bodhi-push', u'push Fedora updates', ['Randy Barlow'], 1),
('man_pages/initialize_bodhi_db', 'initialize_bodhi_db', u'intialize bodhi\'s database',
['Randy Barlow'], 1),
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bodhi', u'bodhi Documentation',
u'Luke Macken', 'bodhi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
tyll/bodhi
|
docs/conf.py
|
Python
|
gpl-2.0
| 9,195
|
# coding: utf-8
from qgis.gui import QgsFontMarkerSymbolLayerV2Widget
from qgis.utils import iface
layer = iface.activeLayer()
font_marker_symbol_layer_v2_widget = QgsFontMarkerSymbolLayerV2Widget(layer)
font_marker_symbol_layer_v2_widget.show()
|
webgeodatavore/pyqgis-samples
|
gui/qgis-sample-QgsFontMarkerSymbolLayerV2Widget.py
|
Python
|
gpl-2.0
| 248
|
#!/usr/bin/env python
from __future__ import unicode_literals
|
michellemorales/OpenMM
|
kaldi/egs/vystadial_cz/online_demo/__init__.py
|
Python
|
gpl-2.0
| 62
|
import os
from flask import Markup
import datetime
import markdown
import bleach
from bleach_whitelist import markdown_tags, markdown_attrs, all_styles
import yaml
class Page(object):
def __init__(self, name, md_path, history_path, history_file):
self.md_path = md_path
print(history_path)
self.history_path = os.path.join(history_path,
name + "/")
self.history_file = history_file
self.name = name
self.init_data()
def init_data(self):
if not os.path.exists(self.history_path):
os.mkdir(self.history_path)
self.revision_num = 1
self.revision_files = []
if os.path.exists(self.history_file):
with open(self.history_file, 'r') as f:
content = yaml.load(f)
self.revision_num = content['revision_num']
self.revision_files = content['revision_files']
def load_content(self):
with open(self.md_path, 'r') as f:
self.md = f.read()
self.content = self.read_md(self.md)
def read_md(self, md):
content = markdown.markdown(md)
clean = bleach.clean(content,
markdown_tags, markdown_attrs, all_styles)
return Markup(clean)
def write_md(self):
with open(self.md_path, 'w') as f:
f.write(self.md)
# get next file and write old content
name = datetime.datetime.strftime(datetime.datetime.now(),
'%Y%m%d%H%M%S%f')
name = "r{0}-{1}.md".format(self.revision_num - 1, name)
stamp_path = os.path.join(self.history_path, name)
# create stamp file
with open(stamp_path, "w+") as f:
f.write(self.previous_md)
print("Saved {}".format(stamp_path))
self.revision_files.append(stamp_path)
# update history file
yaml_struct = {"revision_num": self.revision_num,
"revision_files": self.revision_files}
yaml_dump = yaml.dump(yaml_struct)
with open(self.history_file, "w") as f:
f.write(yaml_dump)
print("Updated {}".format(self.history_path))
def update_md(self, md):
self.previous_md = self.md
self.previous_content = self.content # use if needed to show diff
self.revision_num += 1
self.md = md
self.content = self.read_md(self.md)
def get_content(self):
return self.content
def get_md(self):
return self.md
def get_name(self):
return self.name
def get_revision(self):
return self.revision_num
|
edran/betterinformatics
|
betterinformatics/page.py
|
Python
|
gpl-2.0
| 2,699
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of OpenMalaria.
Copyright (C) 2005-2014 Swiss Tropical Institute
OpenMalaria is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os.path
import sys
from optparse import OptionParser
def readSwArmIds(fileName):
"""Read scenarios.csv file of all arm ids for each sweep per scenario."""
handle=open(fileName)
# get headers, skipping file name
headers=handle.readline().strip().split(',')[1:]
ret=dict()
for line in handle:
csi=line.strip().split(',')
ret[csi[0]] = csi[1:]
return headers,ret
def main(args):
parser = OptionParser(usage="Usage: %prog [options] SCENARIOS.CSV RESULTS_DIR OUTPUT_FILE",
description="""Given a .csv file associating scenario file names
with DB scenario IDs (sce_id), SCENARIOS.CSV, and a directory of results,
RESULTS_DIR, where results have the name of the scenario file but
with .xml substituted for .txt, a tab-separated file of combined
outputs, OUTPUT_FILE, is written, where each line contains sce_id,
survey, group and measure identifiers, and a value.""",version="%prog 0.1")
(options, others) = parser.parse_args(args=args[1:])
if len(others)!=3 or not os.path.isdir(others[1]):
parser.print_usage()
return 1
sweeps,swArmIds = readSwArmIds(others[0])
sce_id_ind = next((i for i in range(len(sweeps)) if sweeps[i] == 'sce_id'), None)
if sce_id_ind is None:
raise Exception("unable to find column for sce_id!")
# sce_id_ind is zero-indexed and misses first column, so add 2:
print(("Column for sce_id:",sce_id_ind+2))
outFile=open(others[2],'w')
resultDir=others[1]
for f,k in list(swArmIds.items()):
sce_id=k[sce_id_ind]
lineStart=sce_id+'\t'
resPath=os.path.join(resultDir,f+'.txt')
if os.path.isfile(resPath):
res=open(resPath)
for line in res:
outFile.write(lineStart)
outFile.write(line)
res.close()
else:
pass # missing result
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
SwissTPH/openmalaria
|
util/assimilateForDBImport.py
|
Python
|
gpl-2.0
| 2,871
|
from utils import *
from scipy import ndimage
def get_dI(imgI, warp_params=np.eye(3)):
# convolve with dx, dy filters
# ? why is it not warp and then grad. BUT grad and then warp
dx_filter = np.array([[-1,-1,-1], [0,0,0], [1,1,1]]).T
imgI_dx = ndimage.correlate(imgI, dx_filter, mode='constant', cval=0.0)
imgI_dy = ndimage.correlate(imgI, dx_filter.T, mode='constant', cval=0.0)
imgI_dx_warped = transform.warp(imgI_dx, warp_params)
imgI_dy_warped = transform.warp(imgI_dy, warp_params)
stacked_dI = np.dstack((imgI_dx_warped, imgI_dy_warped))
return stacked_dI
def get_dw_dp(imgI, p):
h, w = imgI.shape
target_coordinates = np.mgrid[0:h, 0:w][::-1] # [::-1] is needed for making x, y directions horizontal and vertical
# print('target coords: ', target_coordinates[:, :5, :5])
target_coordinates = target_coordinates.reshape(2, -1).T
# print(target_coordinates[:5,:])
source_coordinates = transform.matrix_transform(target_coordinates, np.linalg.pinv(p))
# print(source_coordinates[:5,:])
dw_dp = np.zeros((h*w, 2, 6))
dw_dp[:, 0, 0] = source_coordinates[:, 0] # x
dw_dp[:, 0, 1] = source_coordinates[:, 1] # y
dw_dp[:, 0, 2] = 1
dw_dp[:, 1, 3] = source_coordinates[:, 0] # x
dw_dp[:, 1, 4] = source_coordinates[:, 1] # y
dw_dp[:, 1, 5] = 1
#input()
return dw_dp
def multiply(imgI_dx_dy_w, dw_dp):
# (h, w, 2), (h*w, 2, 6)
N, _, _ = dw_dp.shape
imgI_dx_dy_w = imgI_dx_dy_w.reshape(-1, 1, 2) # (h*w, 1, 2)
mul_result = np.matmul(imgI_dx_dy_w, dw_dp) # (h*w, 1, 2) x (h*w, 2, 6) => (h*w, 1, 6)
return mul_result.reshape(N, -1)
def get_A(imgI, p):
# 3. warp gradient
imgI_dx_dy_w = get_dI(imgI, warp_params=p) # (h, w, 2)
# 4. get jacobian dw/dp
dw_dp = get_dw_dp(imgI, p) # (h*w, 2, 6)
# 5. get dI * dw_dp
dI_mul_dwdp = multiply(imgI_dx_dy_w, dw_dp) # (h*w, 6)
return dI_mul_dwdp
def lucas_kanade_additive(imgT, imgI, eps=1e-8, p=None):
print('lucas_kanade_additive: imgT shape:{}, imgI shape:{}'.format(imgT.shape, imgI.shape))
if p is None:
p = np.eye(3)
p = p[:2, :].reshape(-1, 1)
for iteration in range(10000):
# 1. get warped image
h, w = imgI.shape
current_p = np.concatenate((p.reshape(2,3), np.array([[0,0,1]])), axis=0)
imgI_warped = transform.warp(imgI, current_p)
# show the images
if iteration % 1000 == 0:
show_images(imgT, imgI_warped)
# 2. error image
error_img = imgT - imgI_warped
error_img = error_img.reshape(-1, 1)
# consider errors only for inliers
new_coords = transform.matrix_transform(get_traditional_xy_coords(imgI.shape), current_p)
error_img[new_coords[:,0] < 0] = 0
error_img[new_coords[:,1] < 0] = 0
error_img[new_coords[:,0] > w] = 0
error_img[new_coords[:,1] > h] = 0
if iteration % 100 == 0:
print('step: {}, error: {}'.format(iteration, (error_img**2).sum()))
print('current p', current_p)
# steps 3, 4, 5
A = get_A(imgI, current_p)
# 6. calculate dp (similar to optical flow formulation with least square fitting)
H = np.linalg.pinv(np.matmul(A.T, A))
after_H = np.matmul(A.T, error_img)
dp = np.matmul(H, after_H)
p = p + dp
# check for the minimum error threshold
dp_magnitude = (dp**2).sum()
if dp_magnitude <= eps:
break
return np.concatenate((p.reshape(2,3), np.array([[0,0,1]])), axis=0), imgI_warped
|
InnovArul/codesmart
|
computer_vision/lucas-kanade/lucas_kanade_additive.py
|
Python
|
gpl-2.0
| 3,640
|
from __future__ import print_function
import functools
import io
import logging
import os
import os.path
import sys
from pytest import mark
from libearth.compat import IRON_PYTHON, PY3
from libearth.compat.etree import fromstringlist
from libearth.parser.autodiscovery import get_format
from libearth.schema import write
test_suite_dir = os.path.join(os.path.dirname(__file__), 'parsing')
test_files = frozenset(f
for f in os.listdir(test_suite_dir)
if not f.startswith('.') and f.endswith('.xml'))
test_pairs = {}
missing_inputs = set()
for in_file in test_files:
if not in_file.endswith('.out.xml'):
out_file = in_file.rstrip('.xml') + '.out.xml'
if out_file in test_files:
test_pairs[in_file] = out_file
else:
missing_inputs.add(in_file)
@mark.parametrize(('input_', 'expected'), test_pairs.items())
def test_parse(input_, expected):
with open(os.path.join(test_suite_dir, input_), 'rb') as f:
xml = f.read()
if IRON_PYTHON:
xml = bytes(xml)
parse = get_format(xml)
assert callable(parse)
uri_filename = input_.rstrip('.xml') + '.uri.txt'
try:
with open(os.path.join(test_suite_dir, uri_filename)) as f:
base_uri = f.read().strip()
except (IOError, OSError):
base_uri = 'http://example.com/'
parsed_feed, _ = parse(xml, feed_url=base_uri)
parsed_tree = fromstringlist(
write(parsed_feed, canonical_order=True, hints=False)
)
if IRON_PYTHON:
open_ = functools.partial(io.open, encoding='utf-8')
elif PY3 and sys.platform == 'win32':
open_ = functools.partial(open, encoding='utf-8')
else:
open_ = open
with open_(os.path.join(test_suite_dir, expected)) as f:
expected_tree = fromstringlist(f.read() if IRON_PYTHON else f)
compare_tree(expected_tree, parsed_tree)
def compare_tree(expected, parsed, path=''):
assert expected.tag == parsed.tag, (
'expected: {0}{1}\nparsed: {0}{2}'.format(path, expected.tag,
parsed.tag)
)
path += '/' + expected.tag
for name, value in expected.attrib.items():
expected_value = parsed.attrib.get(name)
assert expected_value == value, (
'{0}@{1}\n expected: {2!r}\n parsed: {3!r}'.format(
path, name, expected_value, value
)
)
for name, value in parsed.attrib.items():
assert name in expected.attrib, (
'{0}@{1}\n expected: None\n parsed: {2!r}'.format(
path, name, value
)
)
expected_text = expected.text
parsed_text = parsed.text
if IRON_PYTHON:
# IronPython XML parser ignores starting and trailing whitespaces.
expected_text = expected_text and expected_text.strip()
parsed_text = parsed_text and parsed_text.strip()
assert expected_text == parsed_text, (
'{0}/text()\n expected: {1!r}\n parsed: {2!r}'.format(
path, expected_text, parsed_text
)
)
expected_children = expected.getchildren()
parsed_children = parsed.getchildren()
for e, p in zip(expected_children, parsed_children):
compare_tree(e, p, path)
expected_len = len(expected_children)
parsed_len = len(parsed_children)
if expected_len > parsed_len:
longer = 'expected'
children = expected_children
else:
longer = 'parsed'
children = parsed_children
diff_len = abs(expected_len - parsed_len)
delta = '\n '.join(e.tag for e in children[-diff_len:])
assert expected_len == parsed_len, (
'{0}\n expected: {1} children\n parsed: {2}'
'\n {3} {4} more elements\n {5}'.format(
path, expected_len, parsed_len, longer, diff_len, delta
)
)
if __name__ == '__main__':
if not missing_inputs:
print('All XML files have their paired .out.xml file.')
raise SystemExit()
try:
input = raw_input
except NameError:
pass
print('There are', len(missing_inputs),
'XML files that have no paired .out.xml files:')
for missing_input in sorted(missing_inputs):
print('\t' + missing_input)
confirm = input('Do you want to create scaffold .out.xml files? ')
if not confirm.strip().lower() in ('y', 'yes'):
raise SystemExit()
logging.basicConfig(level=logging.DEBUG)
formats = {}
for filename in missing_inputs:
print(filename)
with open(os.path.join(test_suite_dir, filename)) as f:
xml = f.read()
try:
parse = get_format(xml)
except Exception:
print('Failed to detect the format of', filename, file=sys.stderr)
raise
uri_filename = filename.rstrip('.xml') + '.uri.txt'
try:
with open(os.path.join(test_suite_dir, uri_filename)) as f:
base_uri = f.read().strip()
except (IOError, OSError):
base_uri = 'http://example.com/'
try:
feed, _ = parse(xml, feed_url=base_uri)
except Exception:
print('Failed to parse', filename, file=sys.stderr)
raise
out_filename = filename.rstrip('.xml') + '.out.xml'
try:
expected = ''.join(write(feed, canonical_order=True, hints=False))
with open(os.path.join(test_suite_dir, out_filename), 'w') as f:
f.write(expected)
except Exception:
print('Failed to write', out_filename, file=sys.stderr)
raise
|
Kjwon15/libearth
|
tests/parsing_test.py
|
Python
|
gpl-2.0
| 5,661
|
"""
WSGI config for diabetto project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "diabetto.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
umax/diabetto2
|
diabetto/wsgi.py
|
Python
|
gpl-2.0
| 391
|
#!/usr/bin/env python
# vim: set expandtab:
###
# Copyright (c) 2013, Jim Richardson <weaselkeeper@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
""" Koji scan grabs all the packages for a given tag in koji, and creates a
list of path locations for repomonger to use. Needs some info from koji server
on where stuff is stored, but for now, just dumps a list of NVR package names.
License: GPL V2 See LICENSE file
Author: Jim Richardson
email: weaselkeeper@gmail.com
Date: 21 Feb 2014
"""
import os
import sys
from ConfigParser import SafeConfigParser
import logging
import koji
# Setup some basic default stuff
CONFIGFILE = '/etc/repomonger/repomonger.conf'
PROJECTNAME = 'kojiscan'
# Setup logging
logging.basicConfig(level=logging.WARN,
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='%y.%m.%d %H:%M:%S')
# Setup logging to console.
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.WARN)
logging.getLogger(PROJECTNAME).addHandler(console)
log = logging.getLogger(PROJECTNAME)
def koji_conn(server):
""" create connection to koji server """
log.debug('Starting in koji_conn')
conn = koji.ClientSession(server, {})
log.debug('leaving koji_conn')
return conn
def koji_packagelist(kojiclient, tag):
""" for now, just get a list of the packages for a given tag """
log.debug('in koji_packagelist looking for %s', tag)
packages = []
log.debug('Opening client session to ')
pkglist = kojiclient.getLatestRPMS(tag)
for pkg in pkglist[1]:
packages.append(pkg['name'])
log.debug(pkg['name'])
log.debug('leaving koji_packagelist')
return packages
def koji_rpmlist(conn, tag, basepath, pkg):
""" get a list of rpms for package, and make full pathname to return """
log.debug('in koji_rpmlist')
files = []
details = conn.getLatestRPMS(tag, package=pkg)
for rpm_pkg in details[0]:
filename = rpm_pkg['name'] + '-' + rpm_pkg['version'] + '-' +\
rpm_pkg['release'] + '.' + rpm_pkg['arch'] + '.rpm'
path = basepath, pkg, rpm_pkg['version'], rpm_pkg['release'], \
rpm_pkg['arch']
pathname = '/'.join(path)
fullpath = pathname + '/' + filename
files.append(fullpath)
log.debug(fullpath)
log.debug('leaving koji_rpmlist')
return files
def run(_args):
""" Beginning the run """
log.debug('entering run()')
_filelist = []
if _args.config:
CONFIG = _args.config
else:
CONFIG = CONFIGFILE
if _args.basepath:
basepath = _args.basepath
else:
basepath = '/mnt/koji/packages'
parsed_config = parse_config(CONFIG)
if _args.kojitag:
tag = _args.kojitag
else:
tag = parsed_config.get('koji', 'tag')
if _args.kojiserver:
server = _args.kojiserver
else:
server = parsed_config.get('koji', 'serverurl')
conn = koji_conn(server)
kojipkgs = koji_packagelist(conn, tag)
for pkg in kojipkgs:
pkgrpms = koji_rpmlist(conn, tag, basepath, pkg)
for pkg in pkgrpms:
_filelist.append(pkg)
if _args.debug:
for _file in _filelist:
log.debug(_file)
log.debug('Exiting run()')
return _filelist
def get_args():
""" Parse the command line options """
import argparse
parser = argparse.ArgumentParser(
description='Scan data from koji')
parser.add_argument('-d', '--debug', dest='debug',
action='store_true', help='Enable debugging.',
default=None)
parser.add_argument('-c', '--config',
action='store', default=None,
help='Specify a path to an alternate config file')
parser.add_argument('-D', '--dst', action='store',
dest="destdir", help='Topdir of cloned repo')
parser.add_argument('-k', '--koji', action='store',
dest="kojiserver", help='koji server to get info from')
parser.add_argument('-t', '--tag', action='store',
dest="kojitag", help='koji tag to get info for')
parser.add_argument('-b', '--basepath', action='store',
help='basepath of koji packages')
_args = parser.parse_args()
_args.usage = PROJECTNAME + ".py [options]"
return _args
def parse_config(_CONFIGFILE):
""" Now parse the config file. Get any and all info from config file.
Push items into args, but args override config settings"""
parser = SafeConfigParser()
if os.path.isfile(_CONFIGFILE):
config = _CONFIGFILE
else:
log.warn('no config file at %s', _CONFIGFILE)
sys.exit(1)
parser.read(config)
return parser
if __name__ == "__main__":
# Here we start if called directly (the usual case.), currently, the only
# case.
args = get_args()
if args.debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.WARN)
if args.config:
CONFIGFILE = args.config
filelist = run(args)
|
weaselkeeper/repomonger
|
src/kojiscan.py
|
Python
|
gpl-2.0
| 6,620
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import StringIO
import httplib
class CachedHttpResponse(StringIO.StringIO):
""" An class similar to an urllib2.Response object for cached responses."""
def __init__(self, url, headerValue, bodyValue, code = 200, doProcessing=True):
""" initialises a new CachedHttpResponse instance
Arguments:
url : String - The URL from which the data comes
headerValue : String - The header data that should be cached
bodyValue : String - The body value that should be cached
Keyword Arguments:
code : Integer - The HTTP return code
doProcessing : Boolean - [optional] If set to True, cache values are extracted.
Defaults to True. Use for creating a simple httpresponse
in case a complex one failed.
"""
StringIO.StringIO.__init__(self, bodyValue)
self.url = url
self.headerValue = headerValue
self.bodyValue = bodyValue
# cached responses are always OK
self.code = code
self.msg = "OK"
# now we set the header value as StringIO
self.headers = httplib.HTTPMessage(StringIO.StringIO(headerValue))
if doProcessing:
self.cacheParameters = self.__ExtractCachHeader(self.headers)
def info(self):
""" Returns headers """
return self.headers
def geturl(self):
""" Returns original URL """
return self.url
def SetCachFlag(self, flag, value = True):
""" Sets additional flags to the Headers
Arguments:
flag : String - Name of the header attribute
Keyword Arguments:
value : Object - The value to store. Eventually it will be stored as
an String.
"""
#headerBuffer = "%s%s: True\r\n" % (self.headerValue, flag)
#print headerBuffer
self.headers[flag] = str(value)
self.headers = httplib.HTTPMessage(StringIO.StringIO(str(self.headers)))
return
def __str__(self):
""" Returns a text representation of the response """
return "CachedHttpResponse with status %s (%s) for %s\nCache-Parameters: %s" % (self.code, self.msg, self.url, self.cacheParameters)
def __ExtractCachHeader(self, headers):
""" Extracts the "Cache-Control" header field and returns it's values
as a dictionary.
Arguments
headers : HTTPHeaders - The headers of a HTTP request/response
Returns a dictionary with the Cache-Control parameters. If a parameter
does not have a value, the value True is used as in general the
availability of a parameter means it is valid.
"""
cacheParams = dict()
if headers.has_key("cache-control"):
headerLine = headers['cache-control']
for entry in headerLine.strip().split(","):
#self.__Log("Found Cache Key: '%s'", entry.strip())
if entry.find("=") > 0:
(key, value) = entry.split("=")
try:
cacheParams[key.strip().lower()] = int(value.strip())
except ValueError:
cacheParams[key.strip().lower()] = True
else:
cacheParams[entry.strip().lower()] = True
if headers.has_key("etag"):
#self.__Log("Found Cache Key: '%s'", entry.strip())
cacheParams['etag'] = headers['etag']
return cacheParams
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/cache/cachedhttpresponse.py
|
Python
|
gpl-2.0
| 4,532
|
# -*- coding: utf-8 -*-
# UFO-launcher - A multi-platform virtual machine launcher for the UFO OS
#
# Copyright (c) 2008-2010 Agorabox, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import logging
import fcntl
import struct
import glob
import sys
import os, os.path as path
from conf import conf
import shutil
import gui
import tempfile
import utils
import re
from osbackend import OSBackend
conf.MOBILE = not conf.USESERVICE
conf.VBOX_INSTALLED = path.exists("/Applications/VirtualBox.app") and \
not path.islink("/Applications/VirtualBox.app")
logging.debug("Using Mobile mode : " + str(conf.MOBILE.__nonzero__()))
logging.debug("Is VirtualBox installed : " + str(conf.VBOX_INSTALLED))
class MacBackend(OSBackend):
VBOXMANAGE_EXECUTABLE = "VBoxManage"
VIRTUALBOX_EXECUTABLE = "VirtualBox"
RELATIVE_VMDK_POLICY = True
KEXTS = "kexts"
def __init__(self):
OSBackend.__init__(self, "macosx")
self.OS_VERSION = os.uname()[2]
if self.OS_VERSION < "9":
self.KEXTS = path.join(self.KEXTS, "Tiger")
self.disks = []
self.tmpdir = ""
def get_default_audio_driver(self):
return self.vbox.constants.AudioDriverType_CoreAudio
def check_process(self):
logging.debug("Checking UFO process")
# We used to use a pipe, but Satan knows why, it returns a shadow
# of the running program. So we run two separate commands
psout = self.call(["ps", "ax", "-o", "pid,command"], output = True)[1]
processes = [ x[0].strip() for x in re.findall(r"(.*/UFO(\n| .*))", psout) ]
logging.debug("ufo process : " + str(processes))
if len(processes) > 1 :
pids = [ i.strip().split(" ")[0] for i in processes ]
i = len(pids) - 1
while i >= 0:
ppid = self.call(["ps", "-p", pids[i], "-o", "ppid"], output=True)[1].split("\n")[-1].strip()
logging.debug("Process %s is a child of %s" % (pids[i], ppid))
if ppid in pids:
del pids[i]
del processes[i]
i -= 1
if len(pids) > 1:
logging.debug("U.F.O launched twice. Exiting")
if self.error_already_running("\nProcessus ".join(processes)):
for pid in pids:
self.call([ "kill", "-9", pid ])
sys.exit(0)
logging.debug("Checking VBoxXPCOMIPCD process")
if self.call([ ["ps", "ax", "-o", "pid,command"],
["grep", "VBoxXPCOMIPCD"],
["grep", "-v", "grep" ] ], output = True)[1]:
logging.debug("VBoxXPCOMIPCD is still running. Exiting")
self.error_already_running('', 'VirtualBox')
sys.exit(0)
def prepare_update(self):
return self.prepare_self_copy()
def prepare_self_copy(self):
self_copied_path = tempfile.mkdtemp(prefix="ufo-self-copied")
os.mkdir(path.join(self_copied_path, ".data"))
src = path.join(conf.DATA_DIR, "..")
logging.debug("Copying " + src + " to " + self_copied_path)
self.call([ [ "tar", "-cf", "-", "-C", src, "Mac-Intel"],
[ "tar", "xf", "-", "-C", self_copied_path ] ], output = True)[1]
self.call([ [ "tar", "-cf", "-", "-C", conf.DATA_DIR, "images" ],
[ "tar", "xf", "-", "-C", path.join(self_copied_path, ".data") ] ], output = True)[1]
self.call([ [ "tar", "-cf", "-", "-C", conf.DATA_DIR, "locale" ],
[ "tar", "xf", "-", "-C", path.join(self_copied_path, ".data") ] ], output = True)[1]
self.call([ [ "tar", "-cf", "-", "-C", conf.DATA_DIR, "settings" ],
[ "tar", "xf", "-", "-C", path.join(self_copied_path, ".data") ] ], output = True)[1]
return path.join(self_copied_path, "Mac-Intel", conf.MACEXE)
def get_model(self, dev):
medianame = utils.grep(self.call(["/usr/sbin/diskutil", "info", dev], output=True)[1], "Media Name:")
if medianame:
return medianame[medianame.find(':') + 1:]
def find_device_by_uuid(self, dev_uuid):
return ""
def find_device_by_volume(self, dev_volume):
output = utils.grep(self.call([ "diskutil", "list" ], output=True)[1], " " + dev_volume + " ").split()
if output:
return "/dev/" + output[-1][:-2]
return ""
def find_device_by_model(self, dev_model):
for device in glob.glob("/dev/disk[0-9]"):
model = self.get_model(device)
logging.debug("device: %s, %s" % (device, model))
if model == dev_model:
return device[:-2]
return ""
def get_free_ram(self):
maxmem = 384
mem = utils.grep(self.call([ "top", "-l", "1" ], output=True)[1], "PhysMem:").split()
for ind, val in enumerate(mem):
if ind > 0 and val in [ "inactive", "free", "free." ]:
val = mem[ind - 1]
ival = float(val[:-1])
if val.endswith('G'):
ival *= 1024
maxmem = max(int(ival), maxmem)
return maxmem
def get_free_space(self, path):
stats = os.statvfs(path)
return stats.f_bavail * stats.f_bsize
def get_dvd_device(self):
pass
def get_host_shares(self):
return [{ 'sharename' : "machosthome",
'sharepath' : path.expanduser('~'),
'displayed' : _("My Mac documents") }]
def get_usb_devices(self):
disks = []
try:
for device in glob.glob("/dev/disk[0-9]s[0-9]"):
infos = self.call([ "diskutil", "info", device ], output=True, log=False)[1]
if utils.grep(infos, "Protocol:").split()[1] == "USB" and \
len(utils.grep(infos, "Volume Name:").split()) > 2 and \
len(utils.grep(infos, "Mount Point:").split()) > 2:
disks.append((utils.grep(infos, "Mount Point:").split()[2],
":".join(utils.grep(infos, "Volume Name:").split(":")[1:]).lstrip(),
utils.grep(infos, "Device Node:").split()[2][:-2]))
except: return []
return disks
def get_usb_sticks(self):
disks = []
try:
for device in glob.glob("/dev/disk[0-9]"):
infos = self.call([ "diskutil", "info", device ], output=True, log=False)[1]
if utils.grep(infos, "Protocol:").split()[1] == "USB":
disks.append([ device, " ".join(utils.grep(infos, "Media Name:").split()[2:]) ])
except: return []
return disks
def restore_fstab(self):
if path.exists('/etc/fstab'):
os.unlink('/etc/fstab')
if path.exists('/etc/fstab.bak'):
shutil.copyfile("/etc/fstab.bak", "/etc/fstab")
def get_device_parts(self, dev):
parts = glob.glob(dev + 's[0-9]')
device_parts = {}
for part in parts:
part_number = int(part[len(part)-1:])
part_info = [ part, self.get_device_size(dev[:5] + "r" + dev[5:], part_number) ]
device_parts.update({ part_number : part_info })
return device_parts
def get_disk_geometry(self, device):
import re
output = self.call(["fdisk", device], output=True)[1]
regexp = re.compile(r"Disk: /dev/disk[0-9]\tgeometry: (\d+)/(\d+)/(\d+) \[(\d+) sectors\]")
cylinders, heads, sectors, sectors_nb = map(int, regexp.search(output).groups())
return cylinders, heads, sectors
def get_device_size(self, dev, partition = 0):
if partition > 0:
dev = dev + "s" + str(partition)
fd = os.open(dev, os.O_RDONLY)
DKIOCGETBLOCKSIZE = 1074029592
DKIOCGETBLOCKCOUNT = 1074291737
blocksize = struct.unpack("l", fcntl.ioctl(fd, DKIOCGETBLOCKSIZE, struct.pack("l", 0)))[0]
blockcount = struct.unpack("L", fcntl.ioctl(fd, DKIOCGETBLOCKCOUNT, struct.pack("L", 0)))[0]
os.close(fd)
return blockcount
def find_network_device(self):
if conf.NETTYPE == conf.NET_HOST and conf.HOSTNET != "":
return conf.NET_HOST, conf.HOSTNET
return conf.NET_NAT, ""
# find each mountable partitions of the device
# and add an entry in fstab to disable automount
# params : dev_string
# return : 0 if device is ready
def prepare_device(self, disk):
# TODO:
# Use chflags cmd insteadof fstab workaround
# when conf.PARTS == "all".
# Also use chflags to avoid system mounts
# other volume than UFO, if they are mountable.
if conf.PARTS == "all":
if conf.MOBILE:
if path.exists("/etc/fstab"):
shutil.copyfile("/etc/fstab", "/etc/fstab.bak")
for partition in glob.glob(disk + "s*"):
volname = grep(self.call([ "diskutil", "info", partition ], output=True)[1], "Volume Name:").split()
if not volname or len(volname) < 3: continue
volname = volname[2]
fstype = grep(self.call([ "diskutil", "info", partition ], output=True)[1], "File System:").split()
if fstype:
fstype = fstype[2]
fstype = { "MS-DOS" : "msdos", "Ext2" : "ext2", "Ext3" : "ext3" }.get(fstype, fstype)
logging.debug('echo "LABEL=%s none %s rw,noauto" >> /etc/fstab' % (volname, fstype))
if conf.MOBILE:
append_to_end("/etc/fstab", "LABEL=%s none %s rw,noauto\n" % (volname, fstype))
retcode = self.call([ "diskutil", "unmount", partition ])
if not retcode:
logging.debug("Unable to umount %s, exiting script" % (conf.DEV,))
gui.dialog_info(title="Erreur",
msg=_('Unable to unmount the volume ') + str(volname),
error=True)
return retcode
return 0
return 0
def execv(self, cmd, root=False):
if root:
tries = 0
while tries < 3:
logging.debug("Asking user password")
remember = False
password = gui.dialog_password(remember=False)
if password == None:
ret = -1
break
self.call([ "sudo", "-k" ])
ret = self.call([ [ "echo", str(password)],
[ "sudo", "-S", "touch", sys.executable ] ], log=False)[0]
if ret == 0:
if remember:
output = self.call( [ "sudo", "-l" ], output=True)[1]
if not "NOPASSWD: /Volumes/UFO/Mac-Intel/UFO.app/Contents/MacOS/UFO" in output:
sudoline = os.environ["USER"] + " ALL=(ALL) NOPASSWD: /Volumes/UFO/Mac-Intel/UFO.app/Contents/MacOS/UFO"
self.call([ "sudo", "-n", "-s", "echo -e " + sudoline + " >> /etc/sudoers" ])
break
else:
gui.dialog_info(title=_("Error"),
msg=_("Sorry, couldn't authenticate. Please check your password."),
error=True)
tries += 1
if ret == 0:
cmd = [ "sudo" ] + cmd
else:
return
logging.debug("Environment: " + str(os.environ))
logging.debug("execv: " + " ".join(cmd))
logging.shutdown()
#os.execv(cmd[0], cmd)
self.call(cmd, spawn=True)
def is_admin(self):
return os.geteuid() == 0;
def umount_device(self, device):
if self.call([ "diskutil", "unmountDisk", device ]) != 0:
return False
return True
def get_kernel_modules(self):
if self.OS_VERSION < "9":
modules = [ "VBoxDrvTiger.kext" ]
else:
modules = [ "VBoxDrv.kext", "VBoxNetFlt.kext" ]
return modules
def load_kexts(self):
# loading kernel extentions
KEXTS = path.join(conf.BIN, self.KEXTS)
self.tmpdir = tempfile.mkdtemp()
modules = self.get_kernel_modules()
for module in modules:
modulepath = path.join(self.tmpdir, module)
shutil.copytree(path.join(KEXTS, module), modulepath)
self.call(["chmod", "-R", "644", modulepath ])
self.call(["chown", "-R", "0:0", modulepath ])
self.call(["/sbin/kextload"] + map(lambda x: path.join(self.tmpdir, x), modules))
def unload_kexts(self):
modules = self.get_kernel_modules()
modules.reverse()
self.call(["/sbin/kextunload"] + map(lambda x: path.join(self.tmpdir, x), modules))
def kill_resilient_vbox(self):
# Kill resident com server
self.call([ "killall", "-9", "VBoxXPCOMIPCD" ])
self.call([ "killall", "-9", "VBoxSVC" ])
def get_respawn_command(self):
if path.basename(sys.executable) == "python":
cmd = [ path.join(path.dirname(sys.executable), path.basename(conf.MACEXE)) ]
else:
cmd = [ sys.executable ] + sys.argv
cmd += [ "--respawn" ]
return cmd
def prepare(self):
if not self.is_admin():
cmd = self.get_respawn_command()
self.execv(cmd, True)
sys.exit(1)
if not conf.VBOX_INSTALLED:
if os.path.islink("/Applications/VirtualBox.app"):
os.unlink("/Applications/VirtualBox.app")
# Restore permissions
# self.call([ "/usr/sbin/chown", "-R", "0:0", conf.APP_PATH ])
# self.call([ "chmod", "-R", "755", "/Applications/VirtualBox.app/Contents" ])
# for f in glob.glob("/Applications/VirtualBox.app/Contents/*.*"):
# self.call([ "chmod", "-R", "644", f ])
self.load_kexts()
else:
self.installed_vbox_error()
if "x86_64" in os.uname()[-1]:
self.unsupported_platform(arch="Mac OS X 64 bits")
os.chdir(path.join(conf.BIN, "..", "Frameworks"))
def cleanup(self):
if conf.MOBILE and conf.PARTS == "all":
self.restore_fstab()
if conf.PARTS == "all":
self.call([ "diskutil", "mountDisk", conf.DEV ])
self.unload_kexts()
if self.tmpdir:
shutil.rmtree(self.tmpdir)
def run_vbox(self, command, env):
self.call(command, env = env)
def find_resolution(self):
if gui.backend == "PyQt":
return str(gui.screenRect.width()) + "x" + str(gui.screenRect.height())
return ""
def onExtraDataCanChange(self, key, value):
# xpcom only return the both out parameters
return True, ""
|
vienin/vlaunch
|
src/macbackend.py
|
Python
|
gpl-2.0
| 15,766
|
Iz09PWlzZmF0YWxwbHVnaW49PT0NCiMgLSotIGNvZGluZzogdXRmLTggLSotDQoNCiMgIGZhdGFs
IHBsdWdpbg0KIyAgd2F0Y2hlcl9wbHVnaW4ucHkNCg0KIyAgQ29weXJpZ2h0IKkgMjAxMCB3ZC9s
b3R1c2ZlZXQgPGRhby95b2dhPg0KDQojICBUaGlzIHByb2dyYW0gaXMgZnJlZSBzb2Z0d2FyZTsg
eW91IGNhbiByZWRpc3RyaWJ1dGUgaXQgYW5kL29yIG1vZGlmeQ0KIyAgaXQgdW5kZXIgdGhlIHRl
cm1zIG9mIHRoZSBHTlUgR2VuZXJhbCBQdWJsaWMgTGljZW5zZSBhcyBwdWJsaXNoZWQgYnkNCiMg
IHRoZSBGcmVlIFNvZnR3YXJlIEZvdW5kYXRpb247IGVpdGhlciB2ZXJzaW9uIDIgb2YgdGhlIExp
Y2Vuc2UsIG9yDQojICAoYXQgeW91ciBvcHRpb24pIGFueSBsYXRlciB2ZXJzaW9uLg0KDQojICBU
aGlzIHByb2dyYW0gaXMgZGlzdHJpYnV0ZWQgaW4gdGhlIGhvcGUgdGhhdCBpdCB3aWxsIGJlIHVz
ZWZ1bCwNCiMgIGJ1dCBXSVRIT1VUIEFOWSBXQVJSQU5UWTsgd2l0aG91dCBldmVuIHRoZSBpbXBs
aWVkIHdhcnJhbnR5IG9mDQojICBNRVJDSEFOVEFCSUxJVFkgb3IgRklUTkVTUyBGT1IgQSBQQVJU
SUNVTEFSIFBVUlBPU0UuICBTZWUgdGhlDQojICBHTlUgR2VuZXJhbCBQdWJsaWMgTGljZW5zZSBm
b3IgbW9yZSBkZXRhaWxzLg0KDQpXQVRDSEVSUyA9IHt9DQoNCmRlZiBpc193YXRjaGVyX2hlcmUo
Z2NoLCBqaWQpOg0KCWlmIEdST1VQQ0hBVFMuaGFzX2tleShnY2gpOg0KCQlnY2hfZGljID0gR1JP
VVBDSEFUU1tnY2hdDQoJCQ0KCQluaWNrbCA9IFtsaSBmb3IgbGkgaW4gZ2NoX2RpYyBpZiBqaWQg
aW4gZ2NoX2RpY1tsaV1bJ2ppZCddIGFuZCBnY2hfZGljW2xpXVsnaXNoZXJlJ10gPT0gMV0NCgkJ
DQoJCWlmIG5pY2tsOg0KCQkJcmV0dXJuIFRydWUNCglyZXR1cm4gRmFsc2UNCg0KZGVmIGhhbmRs
ZXJfd2F0Y2hlcl9wcmVzZW5jZShwcnMpOg0KCXB0eXBlID0gcHJzLmdldFR5cGUoKQ0KCWdyb3Vw
Y2hhdCA9IHBycy5nZXRGcm9tKCkuZ2V0U3RyaXBwZWQoKQ0KCW5pY2sgPSBwcnMuZ2V0RnJvbSgp
LmdldFJlc291cmNlKCkNCglzY29kZSA9IHBycy5nZXRTdGF0dXNDb2RlKCkNCg0KCWlmIHNjb2Rl
ID09ICczMDMnIGFuZCBwdHlwZSA9PSAndW5hdmFpbGFibGUnOg0KCQluZXduaWNrID0gcHJzLmdl
dE5pY2soKQkNCgkJCQkNCgkJamlkID0gZ2V0X3RydWVfamlkKGdyb3VwY2hhdCsnLycrbmV3bmlj
aykNCgkJCQkNCgkJd2djaHMgPSBXQVRDSEVSUy5rZXlzKCkNCgkJDQoJCWZvciB3amlkIGluIHdn
Y2hzOg0KCQkJaWYgbm90IGlzX3dhdGNoZXJfaGVyZShncm91cGNoYXQsd2ppZCk6DQoJCQkJaWYg
Z3JvdXBjaGF0IGluIFdBVENIRVJTW3dqaWRdWydnY2hzJ106DQoJCQkJCXNnY2ggPSBncm91cGNo
YXQuc3BsaXQoJ0AnLDEpWzBdDQoJCQkJCXJlcCA9IHUnWyVzXS0tLSAlcyAoJXMpIGNoYW5nZWQg
aGlzIG5hbWUgdG8gJXMnICUgKHNnY2gsbmljayxqaWQsbmV3bmljaykgDQoJCQ0KCQkJCQltc2co
d2ppZCxyZXApDQoNCmRlZiBoYW5kbGVyX3dhdGNoZXJfbGVhdmUoZ3JvdXBjaGF0LCBuaWNrLCBy
ZWFzb24sIGNvZGUpOg0KCXdnY2hzID0gV0FUQ0hFUlMua2V5cygpDQoJDQoJZm9yIHdqaWQgaW4g
d2djaHM6DQoJCWlmIG5vdCBpc193YXRjaGVyX2hlcmUoZ3JvdXBjaGF0LHdqaWQpOg0KCQkJaWYg
Z3JvdXBjaGF0IGluIFdBVENIRVJTW3dqaWRdWydnY2hzJ106DQoJCQkJc2djaCA9IGdyb3VwY2hh
dC5zcGxpdCgnQCcsMSlbMF0NCgkJCQkNCgkJCQlyZXAgPSB1JycNCgkJCQkNCgkJCQlpZiBjb2Rl
Og0KCQkJCQlpZiBjb2RlID09ICczMDcnOg0KCQkJCQkJcmVwID0gdSdbJXNdLS0tICVzIGtpY2tl
ZCBvdXQgb2YgdGhlIHJvb20nICUgKHNnY2gsbmljaykNCgkJCQkJZWxpZiBjb2RlID09ICczMDEn
Og0KCQkJCQkJcmVwID0gdSdbJXNdLS0tICVzIGhhcyBiZWVuIGJhbm5lZCcgJSAoc2djaCxuaWNr
KQkNCgkJCQllbHNlOg0KCQkJCQlyZXAgPSB1J1slc10tLS0gJXMgbGVhdmVkIHRoZSByb29tJyAl
IChzZ2NoLG5pY2spDQoJCQkJDQoJCQkJaWYgcmVhc29uOg0KCQkJCQlyZXAgKz0gJzogJXMnICUg
KHJlYXNvbikNCgkNCgkJCQltc2cod2ppZCxyZXApDQoNCmRlZiBoYW5kbGVyX3dhdGNoZXJfam9p
bihncm91cGNoYXQsIG5pY2ssIGFmZiwgcm9sZSk6DQoJamlkID0gZ2V0X3RydWVfamlkKGdyb3Vw
Y2hhdCsnLycrbmljaykNCgkNCgl3YWZmID0gJycNCgl3cm9sZSA9ICcnDQoJDQoJd2djaHMgPSBX
QVRDSEVSUy5rZXlzKCkNCgkNCglpZiBhZmYgPT0gJ293bmVyJzoNCgkJd2FmZiA9IHUnb3duZXIn
DQoJZWxpZiBhZmYgPT0gJ21lbWJlcic6DQoJCXdhZmYgPSB1J21lbWJlcicNCgllbGlmIGFmZiA9
PSAnYWRtaW4nOg0KCQl3YWZmID0gdSdhZG1pbicNCgllbGlmIGFmZiA9PSAnbm9uZSc6DQoJCXdh
ZmYgPSB1J25vbmUnDQoJDQoJaWYgcm9sZSA9PSAncGFydGljaXBhbnQnOg0KCQl3cm9sZSA9IHUn
cGFydGljaXBhbnQnDQoJZWxpZiByb2xlID09ICd2aXNpdG9yJzoNCgkJd3JvbGUgPSB1J3Zpc2l0
b3InDQoJZWxpZiByb2xlID09ICdtb2RlcmF0b3InOg0KCQl3cm9sZSA9IHUnbW9kZXJhdG9yJw0K
CQ0KCWZvciB3amlkIGluIHdnY2hzOg0KCQlpZiBub3QgaXNfd2F0Y2hlcl9oZXJlKGdyb3VwY2hh
dCx3amlkKToNCgkJCWlmIGdyb3VwY2hhdCBpbiBXQVRDSEVSU1t3amlkXVsnZ2NocyddOg0KCQkJ
CXNnY2ggPSBncm91cGNoYXQuc3BsaXQoJ0AnLDEpWzBdDQoJCQkJcmVwID0gdSdbJXNdLS0tICVz
ICglcykgam9pbmVkIHRoZSByb29tIGFzICVzLyVzJyAlIChzZ2NoLG5pY2ssamlkLHdhZmYsd3Jv
bGUpIA0KCQkJCW1zZyh3amlkLHJlcCkNCg0KZGVmIGhhbmRsZXJfd2F0Y2hlcl9tZXNzKHR5cGUs
IHNvdXJjZSwgYm9keSk6DQoJZ3JvdXBjaGF0ID0gc291cmNlWzFdDQoJbmljayA9IHNvdXJjZVsy
XQ0KCXdnY2hzID0gV0FUQ0hFUlMua2V5cygpDQoJDQoJZm9yIHdqaWQgaW4gd2djaHM6DQoJCWlm
IG5vdCBpc193YXRjaGVyX2hlcmUoZ3JvdXBjaGF0LHdqaWQpOg0KCQkJaWYgZ3JvdXBjaGF0IGlu
IFdBVENIRVJTW3dqaWRdWydnY2hzJ106DQoJCQkJc2djaCA9IGdyb3VwY2hhdC5zcGxpdCgnQCcs
MSlbMF0NCgkJCQkNCgkJCQlyZXAgPSB1J1slc108JXM+ICVzJyAlIChzZ2NoLG5pY2ssYm9keSkN
CgkJCQkNCgkJCQlzcGxiID0gYm9keS5zcGxpdCgnICcsMSkNCgkJCQkNCgkJCQlpZiBzcGxiOg0K
CQkJCQlpZiBzcGxiWzBdID09ICcvbWUnOg0KCQkJCQkJcmVwID0gdSdbJXNdKiAlcyAlcycgJSAo
c2djaCxuaWNrLHNwbGJbMV0pDQoJCQkJIA0KCQkJCW1zZyh3amlkLHJlcCkNCgkNCmRlZiBoYW5k
bGVyX3dhdGNoZXIodHlwZSwgc291cmNlLCBwYXJhbWV0ZXJzKToNCglncm91cGNoYXQgPSBzb3Vy
Y2VbMV0NCgkNCglpZiB0eXBlID09ICdjb25zb2xlJzoNCgkJamlkID0gJ2NvbnNvbGUnDQoJZWxz
ZToNCgkJamlkID0gZ2V0X3RydWVfamlkKHNvdXJjZSkNCg0KCWlmIEdST1VQQ0hBVFMuaGFzX2tl
eShncm91cGNoYXQpOg0KCQlyZXBseSh0eXBlLCBzb3VyY2UsIHUnVGhpcyBjb21tYW5kIGNhbiBu
b3QgYmUgdXNlZCBpbiBjb25mZXJlbmNlIScpDQoJCXJldHVybg0KDQoJaWYgcGFyYW1ldGVyczoN
CgkJcGFyYW1ldGVycyA9IHBhcmFtZXRlcnMuc3RyaXAoKQ0KCQkNCgkJaWYgcGFyYW1ldGVyc1sw
XSA9PSAnLScgYW5kIGxlbihwYXJhbWV0ZXJzKSA9PSAxOg0KCQkJZGVsIFdBVENIRVJTW2ppZF0N
CgkJCXJlcCA9IHUnaXN0IG9mIGNvbmZlcmVuY2VzIHVuZGVyIG1vbml0b3JpbmcgaXMgY2xlYXJl
ZCwgdGhlIG9ic2VydmF0aW9uIHdpdGhkcmF3biEnDQoJCQlyZXBseSh0eXBlLCBzb3VyY2UscmVw
KQ0KCQllbGlmIHBhcmFtZXRlcnNbMF0gPT0gJy0nIGFuZCBsZW4ocGFyYW1ldGVycykgPiAxIGFu
ZCBwYXJhbWV0ZXJzWzE6XS5pc2RpZ2l0KCk6DQoJCQl3Z2NobiA9IGludChwYXJhbWV0ZXJzWzE6
XSkNCgkJCQ0KCQkJaWYgbm90IFdBVENIRVJTLmhhc19rZXkoamlkKToNCgkJCQlyZXBseSh0eXBl
LCBzb3VyY2UsIHUnTW9uaXRvcmluZyBpcyBub3Qgc2V0dXAhJykNCgkJCWVsc2U6DQoJCQkJd2dj
aHNsID0gV0FUQ0hFUlNbamlkXVsnZ2NocyddDQoJCQkJDQoJCQkJaWYgbGVuKHdnY2hzbCkgPCB3
Z2NobjoNCgkJCQkJcmVwbHkodHlwZSwgc291cmNlLCB1J1dyb25nIG51bWJlciBvZiBjb25mZXJl
bmNlIScpDQoJCQkJZWxzZToNCgkJCQkJd2djaCA9IHdnY2hzbFt3Z2Nobi0xXQ0KCQkJCQlXQVRD
SEVSU1tqaWRdWydnY2hzJ10ucmVtb3ZlKHdnY2gpDQoJCQkJCQ0KCQkJCQlpZiBub3QgV0FUQ0hF
UlNbamlkXVsnZ2NocyddOg0KCQkJCQkJZGVsIFdBVENIRVJTW2ppZF0NCgkJCQkJCQ0KCQkJCQly
ZXAgPSB1J0NvbmZlcmVuY2UgJXMgcmVtb3ZlZCBmcm9tIHRoZSBsaXN0IG9mIGNvbmZlcmVuY2Vz
IHVuZGVyIG1vbml0b3JpbmcsIHN1cnZlaWxsYW5jZSB3aXRoZHJhd24hJyAlICh3Z2NoKQ0KCQkJ
CQkJDQoJCQkJCXJlcGx5KHR5cGUsIHNvdXJjZSxyZXApDQoJCWVsc2U6CQ0KCQkJd2djaCA9IHBh
cmFtZXRlcnMNCgkJCQ0KCQkJaWYgbm90IGNoZWNrX2ppZCh3Z2NoKToNCgkJCQlyZXBseSh0eXBl
LCBzb3VyY2UsdSdJbnZhbGlkIHN5bnRheCEnKQ0KCQkJCXJldHVybg0KCQkJDQoJCQlpZiBub3Qg
V0FUQ0hFUlMuaGFzX2tleShqaWQpOg0KCQkJCVdBVENIRVJTW2ppZF0gPSB7J2djaHMnOltdfQ0K
CQkJDQoJCQlpZiBub3Qgd2djaCBpbiBXQVRDSEVSU1tqaWRdWydnY2hzJ106DQoJCQkJaWYgR1JP
VVBDSEFUUy5oYXNfa2V5KHdnY2gpOg0KCQkJCQlXQVRDSEVSU1tqaWRdWydnY2hzJ10uYXBwZW5k
KHdnY2gpDQoJCQkJCXJlcGx5KHR5cGUsIHNvdXJjZSwgdSdNb25pdG9yaW5nIGZvciBjb25mZXJl
bmNlICVzIHNldHVwIScgJSAod2djaCkpDQoJCQkJZWxzZToNCgkJCQkJaWYgbm90IFdBVENIRVJT
W2ppZF1bJ2djaHMnXToNCgkJCQkJCWRlbCBXQVRDSEVSU1tqaWRdDQoJCQkJCQkNCgkJCQkJcmVw
bHkodHlwZSwgc291cmNlLCB1J1VuYWJsZSB0byBzZXR1cCBtb25pdG9yaW5nLCByZWFzb246IGJv
dCBpcyBub3QgcHJlc2VudCBhdCB0aGlzIGNvbmZlcmVuY2UhJykNCgkJCWVsc2U6DQoJCQkJcmVw
bHkodHlwZSwgc291cmNlLCB1J01vbml0b3Jpbmcgb2YgY29uZmVyZW5jZSBoYXMgZXN0YWJsaXNo
ZWQhJykNCgllbHNlOg0KCQlpZiBXQVRDSEVSUy5oYXNfa2V5KGppZCk6DQoJCQlyZXAgPSB1J0Nv
bmZlcmVuY2UgZm9yIHdoaWNoIHRoZSBlc3RhYmxpc2hlZCBvYnNlcnZhdGlvbiAodG90YWw6ICVk
KTpcblxuJXMnDQoJCQkNCgkJCXdnY2hzbCA9IFdBVENIRVJTW2ppZF1bJ2djaHMnXQ0KCQkJDQoJ
CQl3Z25saSA9IGdldF9udW1fbGlzdCh3Z2Noc2wpDQoJCQkNCgkJCXJlcGx5KHR5cGUsIHNvdXJj
ZSxyZXAgJSAobGVuKHdnY2hzbCksJ1xuJy5qb2luKHdnbmxpKSkpDQoJCWVsc2U6CQ0KCQkJcmVw
bHkodHlwZSwgc291cmNlLCB1J01vbml0b3JpbmcgaXMgbm90IHNldHVwIScpDQoNCnJlZ2lzdGVy
X2NvbW1hbmRfaGFuZGxlcihoYW5kbGVyX3dhdGNoZXIsIENPTU1fUFJFRklYKyd3YXRjaGVyJywg
WydzdXBlcmFkbWluJywnYWxsJywnKiddLCAxMDAsICdBbGxvd3MgeW91IHRvIHJlbW90ZWx5IGlu
IHJlYWwtdGltZSBtb25pdG9yIG90aGVyIGNvbmZlcmVuY2VzIGluIHdoaWNoIHRoZSBib3QsIGku
JiMxMDc3Oy4gZXN0YWJsaXNoZXMgYW4gb2JzZXJ2ZXIuIFdpdGhvdXQgYXJndW1lbnRzIGRpc3Bs
YXlzIGEgbGlzdCBvZiBjb25mZXJlbmNlcyBpbiB3aGljaCB0aGUgb2JzZXJ2ZXIgaXMgc2V0LiBo
ZW4geW91IHNwZWNpZnkgYSBuZWdhdGl2ZSBudW1iZXIgcmVtb3ZlcyB0aGUgY29uZmVyZW5jZSB3
aXRoIHRoaXMgbnVtYmVyIGZyb20gdGhlIGxpc3QgYW5kIHJlbW92ZXMgc3VydmVpbGxhbmNlLics
IENPTU1fUFJFRklYKyd3YXRjaGVyIFs8Y29uZmVyZW5jZT5dfFstPG51bWJlciBmcm9tIHRoZSBs
aXN0Pl0nLCBbQ09NTV9QUkVGSVgrJ3dhdGNoZXInLENPTU1fUFJFRklYKyd3YXRjaGVyIGJvdHpv
bmVAY29uZmVyZW5jZS5qc21hcnQud2ViLmlkJyxDT01NX1BSRUZJWCsnd2F0Y2hlciAtMiddKQ0K
DQpyZWdpc3Rlcl9qb2luX2hhbmRsZXIoaGFuZGxlcl93YXRjaGVyX2pvaW4pDQpyZWdpc3Rlcl9s
ZWF2ZV9oYW5kbGVyKGhhbmRsZXJfd2F0Y2hlcl9sZWF2ZSkNCnJlZ2lzdGVyX3ByZXNlbmNlX2hh
bmRsZXIoaGFuZGxlcl93YXRjaGVyX3ByZXNlbmNlKQ0KcmVnaXN0ZXJfbWVzc2FnZV9oYW5kbGVy
KGhhbmRsZXJfd2F0Y2hlcl9tZXNzKQ==
|
darkvip3r/Tron
|
plugins/watcher_plugin.py
|
Python
|
gpl-2.0
| 8,425
|
from google.appengine.ext import db
from django.contrib.auth.models import User
class Greeting(db.Model):
author = db.ReferenceProperty(User)
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
|
adamfisk/littleshoot-client
|
server/appengine/users/models.py
|
Python
|
gpl-2.0
| 246
|
import Tkinter as tk
import matplotlib
#matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
def create(dataset, window):
# Create a figure
fig = plt.figure()
ax = fig.gca()
# Arrange data to use in the figure
time = dataset.index.to_pydatetime()
t = "Time"
x = "Volume"
y = "Open"
xValues = dataset[x]
#yValues = dataset[y]
#ax.scatter(xValues, yValues)
ax.scatter(time, xValues)
# Automatically choose the best dates
xtick_locator = mdates.AutoDateLocator()
xtick_formatter = mdates.AutoDateFormatter(xtick_locator)
ax.xaxis.set_major_locator(xtick_locator)
ax.xaxis.set_major_formatter(xtick_formatter)
# Rotate the x ticks
plt.setp(plt.xticks()[1], rotation = 60)
# Add grids and the labels
ax.grid(True)
ax.set_xlabel(t); ax.set_ylabel(x); ax.set_title(x + " vs " + t)
# Add the graphic
canvas = FigureCanvasTkAgg(fig, master = window)
canvas.show()
canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
# Add the (interactive) toolbar
toolbar = NavigationToolbar2TkAgg(canvas, window)
toolbar.update()
canvas._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
return [fig, ax, canvas]
def update(dataset, window, graph):
fig, ax, canvas = [each for each in graph]
print "UPDATING NOW"
print "dataset is now = ", dataset
# Close the current figure
#plt.close()
#global fig
#fig = graph[0]
fig.clf()
ax.clear()
#graph[0].clf()
#fig.clf()
#canvas.clear()
# Create a figure
#fig = plt.figure()
#ax = fig.gca()
# Arrange data to use in the figure
time = dataset.index.to_pydatetime()
t = "Time"
x = "Volume"
y = "Open"
xValues = dataset[x]
#yValues = dataset[y]
#ax.scatter(xValues, yValues)
ax.scatter(time, xValues)
# Automatically choose the best dates
xtick_locator = mdates.AutoDateLocator()
xtick_formatter = mdates.AutoDateFormatter(xtick_locator)
ax.xaxis.set_major_locator(xtick_locator)
ax.xaxis.set_major_formatter(xtick_formatter)
# Rotate the x ticks
#plt.setp(plt.xticks()[1], rotation = 60)
## Add grids and the labels
#ax.grid(True)
#ax.set_xlabel(t); ax.set_ylabel(x); ax.set_title(x + " vs " + t)
# Add the graphic
#canvas = FigureCanvasTkAgg(fig, master = window)
#canvas.show()
#canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
canvas.draw()
# Add the (interactive) toolbar
#toolbar = NavigationToolbar2TkAgg(canvas, window)
#toolbar.update()
#canvas._tkcanvas.pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
return [fig, ax, canvas]
#return fig
|
dekespo/DA_GUI_py
|
plots.py
|
Python
|
gpl-2.0
| 2,731
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
import csv
import networkx as nx
import sys
import os
import matplotlib.pyplot as plt
node_file = open('test-graph2.csv', 'r')
dicc_nodes = {}
data_final = open('info-out.csv', 'w')
data_final.close()
data_final = open('info-out.csv', 'a')
viewed_lines = {}
temp_counter = 0
lines_node = node_file.readlines()
count = 0
dicc_nodes = {}
tri_format = False
ex_line = lines_node[0].split(',')
if len(ex_line) == 3:
tri_format = True
for node in lines_node:
if node not in viewed_lines.keys():
viewed_lines[node] = 1
else:
temp_counter = viewed_lines[node]
temp_counter += 1
viewed_lines[node] = temp_counter
nodes = node.split(",")
node1 = nodes[0]
node2 = nodes[1]
if node1 not in dicc_nodes.keys():
dicc_nodes[node1] = 1
else:
counter = dicc_nodes[node1]
counter += 1
dicc_nodes[node1] = counter
if node2 not in dicc_nodes.keys():
dicc_nodes[node2] = 1
else:
counter = dicc_nodes[node2]
counter += 1
dicc_nodes[node2] = counter
g = nx.Graph()
list_nodes = dicc_nodes.keys()
for node in viewed_lines.keys():
nodes = node.split(",")
node1 = nodes[0]
node2 = nodes[1]
if tri_format:
g.add_edge(node1, node2, weight=float(nodes[2]))
else:
g.add_edge(node1, node2[:-1], weight=float(viewed_lines[node]))
print "==================\r\nNodes information: "
print g.nodes()
print "Degree:"
print nx.degree(g)
print "Density: "
print nx.density(g)
print "==================\r\nCentrality: "
print "Closeness Centrality: "
print nx.closeness_centrality(g)
print "Degree Centrality: "
print nx.degree_centrality(g)
print "Betweenness Centrality: "
print nx.betweenness_centrality(g)
print "Edge Betweenness: "
print nx.edge_betweenness(g)
nx.draw(g)
print "\r\nShowing graph... Close the window when you're done to finish"
plt.show()
data_final.close()
print "Fin del programa"
|
LibreSoftTeam/R-SNA
|
net.py
|
Python
|
gpl-2.0
| 2,022
|
from django import template
import locale
register = template.Library()
#@register.filter(name='currency')
#def currency(value):
# try:
# locale.setlocale(locale.LC_ALL,'en_US.UTF-8')
# except:
# locale.setlocale(locale.LC_ALL,'')
# loc = locale.localeconv()
# return locale.currency(value, loc['currency_symbol'], grouping=True)
# this can be used if you're having trouble configuring the proper locale
# for your operating system
@register.filter(name='currency')
def currency(value):
return 'Rp' + str(value)
#INI perlu ditindaklanjuti...
|
mrpindo/openshift-estore
|
tokoku/tokoku/templatetags/TEMP/catalog_filters.py
|
Python
|
gpl-2.0
| 578
|
from django.core import mail
from django.conf import settings
from django.template import Context, loader
from django.utils.translation import ugettext as _
def colab_send_email(subject, message, to):
from_email = settings.COLAB_FROM_ADDRESS
return mail.send_mail(subject, message, from_email, [to])
def send_verification_email(to, user, validation_key):
subject = _('Please verify your email ') + u'{}'.format(to)
msg_tmpl = \
loader.get_template('superarchives/emails/email_verification.txt')
message = msg_tmpl.render(Context({'to': to, 'user': user,
'key': validation_key,
'SITE_URL': settings.SITE_URL}))
return colab_send_email(subject, message, to)
|
rafamanzo/colab
|
colab/super_archives/utils/email.py
|
Python
|
gpl-2.0
| 771
|
"""
BitBake 'Fetch' implementation for bzr.
"""
# Copyright (C) 2007 Ross Burton
# Copyright (C) 2007 Richard Purdie
#
# Classes for obtaining upstream sources for the
# BitBake build tools.
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import logging
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
class Bzr(FetchMethod):
def supports(self, url, ud, d):
return ud.type in ['bzr']
def urldata_init(self, ud, d):
"""
init bzr specific variable within url data
"""
# Create paths to bzr checkouts
relpath = self._strip_leading_slashes(ud.path)
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
ud.setup_revisons(d)
if not ud.revision:
ud.revision = self.latest_revision(ud.url, ud, d)
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
def _buildbzrcommand(self, ud, d, command):
"""
Build up an bzr commandline based on ud
command is "fetch", "update", "revno"
"""
basecmd = data.expand('${FETCHCMD_bzr}', d)
proto = ud.parm.get('protocol', 'http')
bzrroot = ud.host + ud.path
options = []
if command == "revno":
bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
else:
if ud.revision:
options.append("-r %s" % ud.revision)
if command == "fetch":
bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
elif command == "update":
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
else:
raise FetchError("Invalid bzr command %s" % command, ud.url)
return bzrcmd
def download(self, loc, ud, d):
"""Fetch url"""
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
bzrcmd = self._buildbzrcommand(ud, d, "update")
logger.debug(1, "BZR Update %s", loc)
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path)))
runfetchcmd(bzrcmd, d)
else:
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
logger.debug(1, "BZR Checkout %s", loc)
bb.utils.mkdirhier(ud.pkgdir)
os.chdir(ud.pkgdir)
logger.debug(1, "Running %s", bzrcmd)
runfetchcmd(bzrcmd, d)
os.chdir(ud.pkgdir)
scmdata = ud.parm.get("scmdata", "")
if scmdata == "keep":
tar_flags = ""
else:
tar_flags = "--exclude '.bzr' --exclude '.bzrtags'"
# tar them up to a defined filename
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), d, cleanup = [ud.localpath])
def supports_srcrev(self):
return True
def _revision_key(self, url, ud, d, name):
"""
Return a unique key for the url
"""
return "bzr:" + ud.pkgdir
def _latest_revision(self, url, ud, d, name):
"""
Return the latest upstream revision number
"""
logger.debug(2, "BZR fetcher hitting network for %s", url)
bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
return output.strip()
def _sortable_revision(self, url, ud, d):
"""
Return a sortable revision number which in our case is the revision number
"""
return self._build_revision(url, ud, d)
def _build_revision(self, url, ud, d):
return ud.revision
|
sentient-energy/emsw-bitbake-mirror
|
lib/bb/fetch2/bzr.py
|
Python
|
gpl-2.0
| 4,722
|
__copyright__ = """
Copyright (C) 2006, Robin Rosenberg <robin.rosenberg@dewire.com>
Modified by Catalin Marinas
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
from stgit.argparse import opt
from stgit.commands import common
from stgit.lib import transaction
from stgit import argparse
help = 'Push patches to the top, even if applied'
kind = 'stack'
usage = ['[--] <patches>',
'-s <series>']
description = """
Push a patch or a range of patches to the top even if applied. The
necessary pop and push operations will be performed to accomplish
this. The '--series' option can be used to rearrange the (top) patches
as specified by the given series file (or the standard input)."""
args = [argparse.patch_range(argparse.applied_patches,
argparse.unapplied_patches)]
options = [
opt('-s', '--series', metavar = 'FILE',
short = 'Rearrange according to the series FILE')
] + argparse.keep_option()
directory = common.DirectoryHasRepositoryLib()
def func(parser, options, args):
"""Reorder patches to make the named patch the topmost one.
"""
if options.series and args:
parser.error('<patches> cannot be used with --series')
elif not options.series and not args:
parser.error('incorrect number of arguments')
stack = directory.repository.current_stack
if options.series:
if options.series == '-':
f = sys.stdin
else:
f = file(options.series)
patches = []
for line in f:
patch = re.sub('#.*$', '', line).strip()
if patch:
patches.append(patch)
else:
patches = common.parse_patches(args, stack.patchorder.all)
if not patches:
raise common.CmdException('No patches to float')
applied = [p for p in stack.patchorder.applied if p not in patches] + \
patches
unapplied = [p for p in stack.patchorder.unapplied if not p in patches]
iw = stack.repository.default_iw
clean_iw = (not options.keep and iw) or None
trans = transaction.StackTransaction(stack, 'sink',
check_clean_iw = clean_iw)
try:
trans.reorder_patches(applied, unapplied, iw = iw)
except transaction.TransactionHalted:
pass
return trans.run(iw)
|
guanqun/stgit
|
stgit/commands/float.py
|
Python
|
gpl-2.0
| 2,916
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# widgetskeywords - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Keywords in the widgets files"""
def get_widgets_specs():
"""Return an ordered list of (keywords, spec) tuples. The order is
used for configuration order consistency.
"""
specs = []
specs.append(('SITE_SCRIPT_DEPS', {
'Description': 'Scripts needed for your widgets',
'Example': 'jquery.js',
'Type': 'multiplestrings',
'Value': [],
'Context': 'select',
'Required': False,
}))
specs.append(('PREMENU', {
'Description': 'Widgets displayed before menu',
'Example': '',
'Type': 'multiplestrings',
'Value': [],
'Context': 'menu',
'Required': False,
}))
specs.append(('POSTMENU', {
'Description': 'Widgets displayed after menu',
'Example': '',
'Type': 'multiplestrings',
'Value': [],
'Context': 'menu',
'Required': False,
}))
specs.append(('PRECONTENT', {
'Description': 'Widgets displayed before content',
'Example': '',
'Type': 'multiplestrings',
'Value': [],
'Context': 'content',
'Required': False,
}))
specs.append(('POSTCONTENT', {
'Description': 'Widgets displayed after content',
'Example': '',
'Type': 'multiplestrings',
'Value': [],
'Context': 'content',
'Required': False,
}))
return specs
def get_keywords_dict():
"""Return mapping between widgets keywords and their specs"""
# create the keywords in a single dictionary
return dict(get_widgets_specs())
|
heromod/migrid
|
mig/shared/widgetskeywords.py
|
Python
|
gpl-2.0
| 2,555
|
#!/usr/bin/env python
import os
import re
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
PKG_NAME = "woogenerator"
# Get version from __init__.py file
VERSION = ""
with open("%s/__init__.py" % PKG_NAME, "r") as fd:
try:
VERSION = re.search(
r"^__version__\s*=\s*['\"]([^\"]*)['\"]", fd.read(),
re.MULTILINE
)
VERSION = VERSION.group(1)
except AttributeError:
pass
if not VERSION:
raise RuntimeError("Cannot find version information")
# Get long description
README = open(os.path.join(os.path.dirname(__file__), "README.md")).read()
requirements = [
]
setup(
name='WooGenerator',
version=VERSION,
description='Synchronizes user and product data from disparate APIs',
long_description=README,
author='Derwent McElhinney',
author_email='derwent@laserphile.com',
url='https://github.com/derwentx/WooGenerator',
packages=[
PKG_NAME, 'tests'
],
install_requires=[
'bleach',
'ConfigArgParse',
'exitstatus',
'httplib2',
'kitchen',
'npyscreen',
'paramiko',
'phpserialize',
'piexif',
'PyMySQL',
'requests',
'simplejson',
'sshtunnel',
'tabulate',
'unicodecsv',
'google_api_python_client',
'PyYAML',
'wordpress-api',
'pyxero',
'bs4',
'dill',
'lxml',
'pudb'
],
setup_requires=[
'pytest-runner'
],
tests_require=[
'pytest'
],
entry_points={ # Creates a console script entry point on install
'console_scripts': [
'{0}={0}.generator:main'.format(PKG_NAME),
'{0}_gui={0}.gui:main'.format(PKG_NAME),
],
},
)
|
derwentx/WooGenerator
|
setup.py
|
Python
|
gpl-2.0
| 1,886
|
from __future__ import division
import numpy as np
from matplotlib import pyplot as plt
plt.ion()
np.random.seed(0)
import pyhsmm
from pyhsmm.util.text import progprint_xrange
from pyhsmm.util.stats import whiten, cov
import autoregressive.models as m
import autoregressive.distributions as d
###################
# generate data #
###################
As = [0.99*np.hstack((-np.eye(2),2*np.eye(2))),
0.99*np.array([[np.cos(np.pi/6),-np.sin(np.pi/6)],[np.sin(np.pi/6),np.cos(np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2))),
0.99*np.array([[np.cos(-np.pi/6),-np.sin(-np.pi/6)],[np.sin(-np.pi/6),np.cos(-np.pi/6)]]).dot(np.hstack((-np.eye(2),np.eye(2)))) + np.hstack((np.zeros((2,2)),np.eye(2)))]
truemodel = m.ARHSMM(
alpha=2.,init_state_distn='uniform',
obs_distns=[d.AutoRegression(A=A,sigma=np.eye(2)) for A in As],
dur_distns=[pyhsmm.basic.distributions.PoissonDuration(alpha_0=3*50,beta_0=3)
for state in range(len(As))],
)
data, labels = truemodel.generate(1000)
data += np.random.normal(size=data.shape) # some extra noise
fig, spa = plt.subplots(2,1)
spa[0].plot(data[:,0],data[:,1],'bx-')
spa[1].plot(data,'bx-')
spa[1].set_xlim(0,data.shape[0])
fig.suptitle('data')
truemodel.plot()
plt.gcf().suptitle('truth')
##################
# create model #
##################
Nmax = 25
affine = True
nlags = 2
model = m.ARWeakLimitStickyHDPHMM(
alpha=4.,gamma=4.,kappa=100.,
init_state_distn='uniform',
obs_distns=[
d.AutoRegression(
nu_0=2.5,
S_0=2.5*np.eye(2),
M_0=np.zeros((2,2*nlags+affine)),
K_0=10*np.eye(2*nlags+affine),
affine=affine)
for state in range(Nmax)],
)
model.add_data(data)
###############
# inference #
###############
from moviepy.video.io.bindings import mplfig_to_npimage
from moviepy.editor import VideoClip
fig = model.make_figure()
plt.set_cmap('terrain')
plot_slice = slice(0,300)
model.plot(fig=fig,draw=False,plot_slice=plot_slice)
def make_frame_mpl(t):
model.resample_model()
model.plot(fig=fig,update=True,draw=False,plot_slice=plot_slice)
plt.tight_layout()
return mplfig_to_npimage(fig)
animation = VideoClip(make_frame_mpl, duration=10)
animation.write_videofile('gibbs.mp4',fps=30)
|
mattjj/pyhsmm-autoregressive
|
examples/animation.py
|
Python
|
gpl-2.0
| 2,387
|
import os
from flask import Flask, redirect
import urllib2
import socket
import threading
import Queue
def time_connection(server, queue):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(server)
print ("Connected to " + str(server))
if queue.empty:
queue.put(server)
s.close()
except:
print(str(server) + " does not respond!")
app = Flask(__name__)
@app.route('/<mountpoint>')
def get_mountpoit(mountpoint):
queue = Queue.Queue()
data = urllib2.urlopen("http://150.214.150.67/servers.txt")
for line in data:
(host, port) = line.split(':')
port = int(port)
server = (host, port)
print("Processing server: " + str(server))
threading.Thread(target=time_connection, args=(server, queue)).start()
tmp = queue.get()
selected_server = tmp[0] + ':' + str(tmp[1])
url = 'http://' + selected_server + '/' + str(mountpoint)
print("Selected server: " + url)
return redirect(url, code=302)
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5001.
port = int(os.environ.get('PORT', 5001))
#app.debug = True
app.run(host='0.0.0.0', port=port)
|
Sistemas-Multimedia/Icecast-tracker
|
Flask/tracker2.py
|
Python
|
gpl-2.0
| 1,235
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
roughness.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class roughness(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
OUTPUT = 'OUTPUT'
#def getIcon(self):
# return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'dem.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Roughness')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterNumber(self.BAND,
self.tr('Band number'), 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'), False))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Roughness')))
def getConsoleCommands(self):
arguments = ['roughness']
arguments.append(unicode(self.getParameterValue(self.INPUT)))
output = unicode(self.getOutputValue(self.OUTPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-b')
arguments.append(unicode(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
|
SebDieBln/QGIS
|
python/plugins/processing/algs/gdal/roughness.py
|
Python
|
gpl-2.0
| 3,008
|
#!/usr/bin/python
import vertigo, sys, math
ravefactory=vertigo.RaveVertexFactory (
vertigo.RaveConstantMagneticField(0.,0.,4.), vertigo.RaveVacuumPropagator() )
eventfactory=vertigo.EventFactory ( "gun:kinematics-mass:142")
event=eventfactory.next()
simparticles=event.simParticles()
for simparticle in simparticles:
print "simparticle",simparticle
s0=simparticles[0].state().p4().copy()
s1=simparticles[1].state().p4().copy()
E=s0.energy() + s1.energy()
px=s0.x() + s1.x()
py=s0.y() + s1.y()
pz=s0.z() + s1.z()
print "Manually s0+s1", math.sqrt ( E*E - px*px - py*py - pz*pz )
print "Direct method s0+s1=",( s0+s1 ).m()
|
newtrino/vertigo
|
examples/python/InvariantMass.py
|
Python
|
gpl-2.0
| 637
|
# -*- coding: utf-8 -*-
"""
Created 2015
@author: hrotzing
Modified 2017 by S1
"""
import logging
import os
import time
import qkit
alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
class DateTimeGenerator(object):
"""DateTimeGenerator class to provide a timestamp for each measurement file
upon creation.
For sorting and unique identification across multiple measurement setups
we provide a timestamp for each h5 file created with qkit. We use the unix
timestamp to get time resolution of one second which should be enough for
our needs. The integer timestamp is then converted either into a HHMMSS
representation using day and month information to create a folder, or the
timestamp gets converted using the alphabet to create a 6 digit UUID.
"""
def __init__(self):
self.returndict = {}
self.returndict['_unix_timestamp'] = int(time.time())
self.returndict['_localtime'] = time.localtime(self.returndict['_unix_timestamp'])
self.returndict['_timestamp'] = time.asctime(self.returndict['_localtime'])
self.returndict['_timemark'] = time.strftime('%H%M%S', self.returndict['_localtime'])
self.returndict['_datemark'] = time.strftime('%Y%m%d', self.returndict['_localtime'])
self.returndict['_uuid'] = encode_uuid(self.returndict['_unix_timestamp'])
# call for h5 filename, qkit config gets checked for encoding procedure.
def new_filename(self, name=None):
if qkit.cfg.get('datafolder_structure',1) == 2:
self.new_filename_v2(name) # 6 digit UUID
else:
self.new_filename_v1(name) # HHMMSS representation
self.returndict['_folder'] = os.path.join(qkit.cfg['datadir'], self.returndict['_relfolder'])
self.returndict['_relpath'] = os.path.join(self.returndict['_relfolder'],self.returndict['_filename'])
self.returndict['_filepath'] = os.path.join(self.returndict['_folder'], self.returndict['_filename'])
return self.returndict
def new_filename_v1(self, name):
filename = str(self.returndict['_timemark'])
if name != '' and name is not None:
filename += '_' + str(name)
self.returndict['_filename'] = filename + '.h5'
'''Old filename with datadir/YYMMDD/HHMMSS_name/HHMMSS_name.h5'''
self.returndict['_relfolder'] = os.path.join(
self.returndict['_datemark'],
filename
)
def new_filename_v2(self, name):
filename = str(self.returndict['_uuid'])
if name != '' and name is not None:
filename += '_' + str(name)
self.returndict['_filename'] = filename + '.h5'
'''New filename with datadir/run_id/user/uuid_name/uuid_name.h5'''
self.returndict['_relfolder'] = os.path.join(
qkit.cfg.get('run_id', 'NO_RUN').strip().replace(" ", "_"),
qkit.cfg.get('user', 'John_Doe').strip().replace(" ", "_"),
filename
)
def encode_uuid(value):
"""Encodes the integer unix timestamp into a 6 digit UUID using the alphabet.
Args:
Integer-cast unix timestamp.
Return:
6 digit UUID string.
"""
# if not value: value = self._unix_timestamp
output = ''
la = len(alphabet)
while value:
output += alphabet[value % la]
value = int(value / la)
return output[::-1]
def decode_uuid(string):
"""Decodes the 6 digit UUID back into integer unix timestamp.
Args:
6 digit UUID string.
Return:
Integer-cast unix timestamp.
"""
# if not string: string = self._uuid
output = 0
multiplier = 1
string = string[::-1].upper()
la = len(alphabet)
while string != '':
f = alphabet.find(string[0])
if f == -1:
raise ValueError("Can not decode this: {}<--".format(string[::-1]))
output += f * multiplier
multiplier *= la
string = string[1:]
return output
|
qkitgroup/qkit
|
qkit/storage/hdf_DateTimeGenerator.py
|
Python
|
gpl-2.0
| 4,039
|
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import sys
import json
from .simple_logging import SimpleLogging
log = SimpleLogging(__name__)
addon = xbmcaddon.Addon()
class HomeWindow:
"""
xbmcgui.Window(10000) with add-on id prefixed to keys
"""
def __init__(self):
self.id_string = 'plugin.video.embycon-%s'
self.window = xbmcgui.Window(10000)
def get_property(self, key):
key = self.id_string % key
value = self.window.getProperty(key)
# log.debug('HomeWindow: getProperty |{0}| -> |{1}|', key, value)
return value
def set_property(self, key, value):
key = self.id_string % key
# log.debug('HomeWindow: setProperty |{0}| -> |{1}|', key, value)
self.window.setProperty(key, value)
def clear_property(self, key):
key = self.id_string % key
# log.debug('HomeWindow: clearProperty |{0}|', key)
self.window.clearProperty(key)
def add_menu_directory_item(label, path, folder=True, art=None):
li = xbmcgui.ListItem(label, path=path)
if art is None:
art = {}
art["thumb"] = addon.getAddonInfo('icon')
li.setArt(art)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=path, listitem=li, isFolder=folder)
def get_kodi_version():
json_data = xbmc.executeJSONRPC(
'{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
result = json.loads(json_data)
try:
result = result.get("result")
version_data = result.get("version")
version = float(str(version_data.get("major")) + "." + str(version_data.get("minor")))
log.debug("Version: {0} - {1}", version, version_data)
except:
version = 0.0
log.error("Version Error : RAW Version Data: {0}", result)
return version
|
faush01/plugin.video.embycon
|
resources/lib/kodi_utils.py
|
Python
|
gpl-2.0
| 1,891
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt4 (Qt v4.8.7)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x01\xd9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x01\x15\x12\x23\x06\xc8\x5f\x9a\x75\x00\x00\x01\x66\x49\x44\
\x41\x54\x48\xc7\xd5\xd3\xb1\x4b\x16\x61\x1c\x07\xf0\xcf\x73\x16\
\x35\x68\x14\xe1\x60\xd8\xe4\xe4\xd6\xe0\x10\x44\x5b\x9b\x4d\x85\
\x1e\xf4\x16\x04\xfd\x01\x42\xe0\x1a\xee\x41\xd0\x56\x63\x4d\x25\
\xe8\xa2\xa3\x58\x53\xd0\x20\xd1\x10\x44\x63\x93\x43\xe8\xa0\x62\
\xfa\xc6\xfb\xb8\x9c\xf1\x74\xa5\xde\x73\x6f\x0d\xfd\x96\xbb\xe3\
\x9e\xfb\x3c\xdf\xe7\xb9\xe7\xc7\xff\x5a\x21\x7d\x98\xa2\xb8\xcd\
\x4c\xe0\x41\x60\x2c\xb2\x85\xd5\x2e\x8f\xee\xf2\xa5\x2f\xfc\x35\
\xaf\x02\x25\x44\xb6\x03\x83\xd5\xab\xcd\x2e\x57\x3a\x7c\xcd\xc1\
\x8b\x04\xbe\x15\x28\x23\x1b\x91\x6b\x25\x43\x7b\x8c\x44\xd6\x70\
\xe1\x34\x4f\x73\x93\x17\xc9\x12\xee\x57\xd7\x67\x25\xef\xe0\x1e\
\xeb\x91\xc7\xd5\x4a\x6e\x3c\xe7\x54\x0e\xfe\x73\x70\x64\x22\xa0\
\xc7\xfb\x74\xc0\x67\x16\xc7\x18\x81\x15\x7a\xad\x70\x0c\x57\xc9\
\xbf\xa5\x03\xe6\xe8\x62\xbd\xcd\x69\x29\xea\x3f\xb7\x47\xfc\x5b\
\x47\x31\x4d\xbe\x81\xe1\x82\xf3\xb5\xe4\xe1\x1c\x67\xe0\x21\xdf\
\x8f\xc3\xe6\x93\x60\xd3\x84\x34\xf9\x87\xc3\xbd\x4f\x3f\x18\xa7\
\x1c\x65\x77\x94\x9d\x27\x9c\x6d\x0a\xd7\xb7\xe5\x65\xb5\x37\x33\
\xf3\x5c\x85\x17\x5c\x0a\xcc\x56\x93\xbe\x39\x2a\xf9\x9f\xe0\x5f\
\x9a\x68\x8e\x30\xce\x52\x60\xb2\xde\x44\x91\xad\x1f\x5c\xef\xf0\
\xb1\x29\x0c\x03\x87\x37\x6f\x71\x93\x85\x01\xf6\x71\x39\x70\x11\
\x9b\x58\x8e\x74\xee\xf0\x29\x07\xfe\xad\xfd\x73\xea\x24\xb8\x35\
\xde\x04\x6e\x85\x37\x85\xb3\xf1\x1c\x38\x0b\xcf\x85\x1b\xe3\x6d\
\xe0\x46\x78\x5b\xf8\x44\xbc\x1f\xb8\xde\xfe\x47\xd6\x74\x1f\xfd\
\xf0\xcf\xea\x00\x22\xfa\x61\x32\xed\x77\xdd\x16\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x36\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x05\x0d\x13\x03\x30\xcd\xfe\xfe\x14\x00\x00\x05\xc3\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x6b\x6c\x96\xe5\x19\xc7\x7f\xf7\xe9\x79\
\xde\xb7\x6f\x8b\x58\x0e\x72\xc6\xa8\xc8\xc0\x12\x08\x33\x16\x08\
\x07\x41\xe8\x88\x0c\x8d\xce\x10\xe3\x1c\xd9\x1c\xc9\xc4\x44\x12\
\x4c\xb6\x98\x4c\x0d\xe2\x12\xb2\xc9\xba\x03\xc3\x65\x82\x71\x09\
\x9e\x08\x0e\x51\x27\x30\xa9\x58\x5a\x3a\x04\x0b\xe5\x20\x08\x85\
\x5a\x94\x14\x39\xb3\x52\x69\xdf\xe7\x79\xee\xc3\x3e\xbc\x84\x68\
\x46\xdc\x3e\xed\x4a\xee\x5c\x5f\xae\xfb\xf7\xe1\xba\xae\xff\xf5\
\x17\x7c\x4b\x34\x37\xef\x19\xaf\xb5\x99\xeb\xbd\x9d\xfd\xaf\xce\
\xd3\xb7\x5d\xb8\x70\xb2\xe2\x58\xfb\x9e\xf4\xf8\x17\xfb\xdb\x04\
\xa2\x41\x2a\xbd\xfe\xfa\xeb\x06\xee\xfc\xd5\x92\x57\x3a\xaf\xf5\
\x5f\x5c\x1b\xba\x7b\xb4\x10\x72\x9b\x31\xba\x72\xdf\xc1\x2d\x72\
\xff\x81\x3a\x32\x97\x10\x82\x87\x10\xf0\x01\x9c\x73\x38\xeb\x82\
\xb5\xae\xbb\xbc\x70\xfd\xe2\x3f\x2c\xdf\xbc\xea\xbf\xc2\x0f\x7e\
\xd2\xba\xac\xad\xbd\xf9\xc9\x63\xed\x3b\xf9\xa2\xe3\x40\x00\x2f\
\xa2\x28\x46\x0a\x89\x90\x82\x10\x02\xc1\x07\xbc\xf7\x58\xe7\xb0\
\x99\xa5\xbb\xa7\x07\x29\xf4\xd6\x89\xd5\xf7\xcd\x79\x64\xfe\xcf\
\x8b\xd7\x84\xaf\x5d\xb7\x7a\xd5\xce\xe6\xf5\x0b\x92\xec\x52\xc8\
\xe5\xf2\xc2\x68\x8d\x36\x0a\xa5\x54\x09\x0e\x04\x20\x84\x80\x73\
\x8e\xe0\x03\x42\xc5\x0c\x1f\x3a\x86\xb3\x67\x4e\xf2\xd9\xf1\x83\
\x9d\xa3\x46\x4e\x1c\xf8\xc4\xa2\xe5\x3d\xdf\x80\x3f\xba\x68\xea\
\x52\xef\xed\xd3\xb9\x7c\x14\x72\x71\x2c\x8c\x31\x98\x48\xa3\xb5\
\x46\x49\x85\x94\x02\x84\xc4\xbb\x0c\xe7\x3d\xe3\xc6\xcc\x62\x6c\
\x55\x0d\x6d\xed\x2d\x6c\x6b\x7c\x83\x8b\x9d\xe7\x42\x92\x64\xc2\
\x3b\xf1\xd1\x5f\x56\x34\x4e\xbc\x0a\x7f\xf2\xe9\x79\xc3\xce\x5f\
\x38\xf1\x79\x3e\x1f\x87\x5c\x2e\x16\x51\x64\x88\xe2\x08\xa3\x4b\
\x70\x29\x25\x10\xe8\xd7\x67\x38\x23\x6e\xb9\x83\xef\x8e\x9b\x43\
\x77\xf7\x25\xea\xea\x5f\xe6\xc0\xa1\x06\x42\x10\xa4\x49\x4a\x92\
\x26\x74\x5f\xee\x21\x32\x15\x4b\x56\xd4\xbe\xff\xac\x00\x58\xf0\
\xd8\xc4\xcf\xa3\x58\x0d\xcb\xe7\xf3\xe4\xe2\x88\x38\x8e\x89\x22\
\x83\x31\x06\x29\x05\x7d\x2a\x07\xf3\xbd\xbb\x16\x52\x59\x39\x08\
\xad\x34\x6d\xed\x2d\x6c\xae\xfb\x33\x49\x5a\xc4\x59\x4b\x9a\x65\
\x25\x78\x92\xd0\x5d\x4c\xe8\xb9\x5c\x2c\x56\x14\xfa\xde\xa4\x1f\
\x5d\x34\x6d\x94\x73\xe9\x20\x63\x4c\x30\x5a\x97\xda\x61\x34\xe0\
\x19\x3a\x64\x34\x13\x6e\xbf\x97\xa1\x43\x46\x03\x90\xa6\x3d\x6c\
\xfa\x70\x15\xc7\xda\x76\x21\x95\xc6\x68\x83\x40\x94\xe6\xe0\x3d\
\xce\x3b\x74\x96\x61\x22\x93\x4b\xb2\xee\x59\x5a\x20\x26\x2b\x2d\
\xb5\xd6\x0a\x6d\x14\x52\x49\x0a\xe5\xbd\xa9\x99\xfe\x53\x46\x8e\
\xb8\x03\xef\x3d\x21\x04\x4e\x9d\x3e\xc6\xbb\x9b\x6b\x11\x42\x11\
\xe7\xca\xf0\xce\x23\x70\x04\x02\xde\x3b\x8c\x89\x91\xd2\x50\x5e\
\xd6\x97\x8e\x2f\xdb\xb1\x99\xfb\xb1\x76\x2e\x9b\x19\xc5\x0a\x25\
\x15\x84\xc0\x84\xdb\xef\x65\x52\xf5\xfd\xe4\x72\x79\xbc\xf7\x48\
\x29\xf9\xa0\x7e\x35\xc7\x4f\xec\x47\x29\x03\x57\xb6\x45\xa8\x52\
\xf6\xde\x32\x72\xc4\x04\xac\x73\x1c\x69\xfd\x98\xce\xce\x33\x68\
\xad\x48\x8a\xc9\x14\x8d\xe0\x3b\x08\x28\x14\xae\xe3\xfb\xb3\x17\
\x52\x35\x6a\x32\x08\x8f\x10\x92\xae\xae\xb3\xd4\x6d\x5b\xcd\x85\
\x8b\x1d\x28\xa5\x11\xa2\xb4\xe7\x00\xce\x7b\x6e\xba\x71\x1c\xfd\
\xfa\xdc\xc8\xa7\xad\x4d\x9c\xe8\x68\xc5\x59\x87\x94\x12\x21\x04\
\x10\xb4\x96\x52\x96\x57\xf6\x1e\xc0\x82\xf9\xb5\x94\x95\x95\xe3\
\xbc\xc5\x68\x73\xa5\x0d\xbf\x2b\xed\xb8\x54\x08\x21\xae\x3e\xad\
\x73\x3c\x38\xe7\x17\xfc\x73\xe7\x3a\x3e\x6a\x5e\x8f\x75\x16\xa3\
\x0c\xce\xfa\xaf\xd5\x81\x1e\x5b\x35\xf3\xf2\xac\x19\x0f\x23\xa5\
\x02\x20\x8a\x72\xec\xda\xbd\x81\x8f\x5b\xde\x26\x36\x39\x02\xa1\
\x34\xb4\x10\xb0\x2e\xa5\x7a\xfc\x7d\x54\xf4\xea\xc7\xdb\x1b\x9f\
\xe7\x52\xd7\x79\xae\xf4\x09\x4f\x20\xcb\x8a\x68\x1d\x33\x7e\xec\
\x34\xca\x72\xfd\x11\x2d\x2d\xfb\xd6\x15\x0a\xf9\x07\x0a\x85\x72\
\xf2\xf9\x1c\xdb\x77\xbc\xce\xa7\x47\x1b\xc8\xe7\x0b\x68\xa5\x51\
\x4a\x23\xa5\x20\x8a\xf2\xdc\x39\x79\x3e\xa7\xcf\x7c\xc6\xee\xbd\
\xef\xa1\x54\x84\x73\x16\x6b\x33\x84\xd0\x54\x14\xfa\x32\xe6\xb6\
\x59\x54\xf6\x1e\x46\x43\xd3\x9b\xd4\x6f\x7f\xd3\x6a\xef\xdd\x07\
\xd6\xba\x07\xa4\x32\x6c\xf8\x7b\x2d\x27\x4f\x1d\x26\x8e\x63\xbc\
\xf7\x78\xe1\x01\x4b\xaf\x8a\x1b\xb8\xbb\x66\x11\x1b\xb7\xac\xa0\
\xab\xeb\x3c\x4a\x45\x58\x9b\x21\xa5\x62\xca\xc4\x87\xb8\xf5\x96\
\x49\x1c\x39\xba\x93\x0f\x1b\x5f\xe5\xec\xd9\x2f\xe9\xba\xdc\x85\
\xd1\xf1\x0e\xed\x9c\x6b\x0c\x41\xb8\x8d\x9b\x5f\x90\x47\xda\x76\
\x89\x42\x59\x01\xe7\x1c\xd2\x49\x9c\xcb\x18\x33\x7a\x1a\x55\xa3\
\x67\xf0\xb7\x77\x96\x91\x66\x3d\x58\x9b\x32\xa0\xff\xcd\x54\x8d\
\x9a\xce\xe0\x41\xa3\x38\xdc\xba\x83\x97\xd6\x2c\xe6\xdc\xb9\x0e\
\x9c\x83\x34\x4b\xf1\xd6\xa3\xa4\x5a\x23\x00\x5e\xfa\xeb\x6f\x3a\
\x76\xb5\xac\x1f\x54\x51\xde\xeb\x6b\x0a\x8d\xb8\x73\xca\x0f\x19\
\x32\x78\x24\x6f\xbd\xfb\x6b\x84\x10\xc4\x71\x19\x77\xd7\x3c\x4e\
\x9f\xca\x21\x34\x34\xbd\xc6\xde\x03\x75\x84\x00\x36\xcb\x48\xbe\
\xa9\xd0\x24\x8a\x0b\x23\x34\x40\x73\xcb\xa6\x19\xc1\xc9\xc3\x59\
\x96\x06\x25\xa5\xd0\x5a\x33\x77\xf6\x62\x32\xdb\xcd\x2b\x6f\x3c\
\x45\xaf\x5e\xfd\x98\x35\xfd\x11\x06\x0d\xb8\x95\xfa\xed\x6b\x68\
\x6b\xdf\x43\xb1\x78\xb9\x04\xb6\x19\x69\x9a\x91\x65\xa5\x9c\x16\
\x53\xb4\x8e\x57\xae\xac\xad\x3b\x71\xf5\x2a\xfe\xec\xf1\xa9\xbf\
\x0d\xd8\x27\xfa\x54\xf6\x0d\x3f\x7a\x70\xa9\xb0\xae\xc8\x86\xf7\
\x9e\x67\x5c\xd5\x0c\x66\x4e\xff\x09\x87\x8e\x34\xb1\x65\xeb\x8b\
\x28\xa5\x09\x01\xbc\xf7\xa5\xbb\x62\x2d\x36\xcb\x28\x26\x69\x48\
\x8a\x89\xc8\xac\xdf\xff\xe2\x8a\xa6\xb1\xff\x71\xcf\x7f\xb9\xe4\
\xa1\x77\xee\xbf\x67\xf1\x5c\xef\x13\x36\x6e\xf9\x13\x0f\xcf\x5b\
\x42\x2e\x57\xc6\xda\xb7\x96\xf2\xd5\x57\x17\x10\xb2\x54\xee\x9d\
\x2f\xc1\x9d\x23\xb3\x96\x2c\x2d\xb5\x25\x4d\x9d\x1d\x70\xc3\xf0\
\xfe\xcf\x3d\xf3\xfa\xc5\x6b\x3a\xd1\x81\xfd\x87\x56\xee\x3b\x58\
\xf7\xd8\xd4\x49\x3f\x60\xef\x27\xef\xb3\xad\xe9\x55\x8c\xd6\x57\
\x85\x14\x28\x39\x91\xf3\x0e\x67\x1d\xd6\x3a\x92\x24\xc1\x07\xb9\
\x7b\x60\xbf\x61\x77\x3d\xfb\xcc\x6b\x9d\xdf\xea\xa1\xad\x47\x8e\
\x4f\x5a\xb3\xf6\xa9\x4d\xa7\xce\x1c\x2d\x8f\xa2\x58\x96\x54\x2a\
\x82\xb8\xa2\x7f\x1f\x82\xf0\xae\x64\x73\x59\xea\x7a\xa2\x28\xbf\
\xec\x85\xdf\x6f\x7d\xee\x7f\x32\x68\x80\x05\x0b\xab\x8d\x89\xf2\
\xd5\x3e\xf8\x79\xc1\xbb\x1a\x08\x23\x4b\x77\x25\x20\x95\x3e\x4b\
\xa0\x5e\x4a\xbd\xd6\x98\xb8\xf1\x8f\xcb\xff\x71\x86\xff\x77\xfc\
\x1b\xa6\xa7\xd9\x89\x83\xf0\xf3\xfd\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x04\x03\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x07\x09\x03\x31\x35\x72\x3c\xea\x08\x00\x00\x03\x90\x49\x44\
\x41\x54\x48\xc7\xbd\x94\x69\x68\x5c\x55\x14\xc7\x7f\xf7\xbd\xd9\
\xde\x4c\x92\x92\x64\xac\x0b\x08\xa2\x81\x52\x29\xea\x97\x66\x29\
\x54\x34\xa4\x2a\x2a\xc4\xda\x22\x22\x75\x21\x46\x6a\xb4\x69\xb0\
\x71\x6b\x63\x35\x2d\xad\x15\x13\xd0\xa4\xad\xa5\x50\xeb\xd2\x8a\
\x04\x0c\x85\x1a\x0d\x55\x34\xc1\x22\xb6\x89\x4d\x17\x17\x4c\x27\
\xd1\x34\x90\xa4\x59\xa8\x3a\xd3\x4c\x66\x7c\xcb\xf5\x43\xde\x4c\
\xa7\x93\xf7\x62\x3e\xf9\x87\xf3\xe1\x9c\x77\xee\xff\x9c\xfb\xbf\
\xe7\x3c\xc1\x5c\xc4\x80\x1c\x87\x38\x9f\xb5\x1f\x14\x03\x7f\xf4\
\x31\x34\x7c\x0e\x81\x40\x51\x3d\xe4\x2f\xba\x9e\x1d\x8d\x87\x9d\
\xd2\x51\xb2\x7c\x09\x54\x02\x77\x00\x85\xc0\x22\xdb\x4a\x80\x86\
\xb5\x6b\xaa\xa2\x63\xe3\x3f\xe3\xf5\x29\x08\xd5\x62\xef\x3b\xdf\
\x30\x36\x1e\xa1\xee\xc5\xfb\xfe\x93\x5c\x02\x3e\xe0\x5b\xe0\x2c\
\x70\x09\x88\xda\xd6\x03\xec\x02\x56\xb5\x34\x75\x47\x83\x5a\x80\
\x50\x50\x03\x20\x18\x0c\x70\x39\x7e\x89\x67\x9e\x5f\xc1\xc1\x8f\
\x9b\xae\x22\x17\x19\xc4\x22\x23\xbe\x19\x68\x06\xc2\x80\x17\x18\
\xce\xf8\x56\x02\x7c\x0d\xe4\xa5\x02\x07\x3e\x6a\x64\x72\x62\x94\
\xdf\x87\x7e\x61\xe9\x92\x32\x36\x6d\x6c\x4e\x77\x9e\x04\x96\x64\
\xdd\x68\x1d\x70\x06\x18\x05\x2e\xd8\xc5\x53\xe8\x01\xde\x4a\xc5\
\xba\x8f\xb7\x53\xfd\x64\x23\xe3\x53\x11\x3c\x5e\x41\x7f\xe4\xe4\
\x55\x44\x53\xc0\x4a\x07\xed\xb3\x7d\x99\xd5\x7d\xda\x7f\x7d\x67\
\x25\x00\x2f\xbc\x5a\xce\xfa\xda\x32\x6a\x37\xdd\x93\x96\xe5\x3c\
\xb0\xc2\x2e\x82\x8b\x4c\x39\xf6\x14\xa5\x62\x79\xc0\xdf\x59\x39\
\x69\x54\x3f\x57\x4a\x6e\x28\x8c\xc7\x3e\x34\xe5\x90\x93\x07\x5c\
\x6b\x4b\x93\x60\x01\x68\xd8\xfe\x00\xf1\xf8\x0c\x89\x84\x4e\x52\
\x8f\xcf\x19\xc5\x4c\x44\x81\x08\x30\x0d\x04\xb2\xbe\x79\x9c\x0e\
\x04\xb5\x1c\x16\x87\x6f\x44\x55\x05\x96\x65\xa2\x00\xb9\xf6\x2c\
\xcf\x87\x58\x96\x5f\xe6\xd8\xf9\x4b\x6d\x44\x63\x13\x78\x3c\x2a\
\xa6\xa9\xa3\x00\x05\xb6\xe6\xb8\x3c\xa2\xcc\x1a\x5b\x01\xac\x72\
\xeb\x62\xd7\xb6\x4e\x84\x10\x80\x44\x79\x76\xe3\xca\x30\xf0\xa5\
\x7d\x83\x14\xee\xb7\x49\x32\x2d\x85\xd5\x40\x9d\xdb\x63\x02\x08\
\x21\x10\x02\x3c\xb7\x2f\xab\x60\x60\xe0\x43\x51\x54\x74\x8b\x04\
\xf2\x81\xbf\x80\x4e\x97\x73\x0f\x03\xed\x2e\x13\x05\x40\xe3\x9b\
\x6b\x28\xba\xb9\x94\x60\x60\x31\x4a\xe9\xf2\x87\xb0\x2c\x93\x91\
\x91\x51\x01\xfc\x09\x94\xdb\xb7\x50\x6c\xf3\xdb\x1a\xef\xcd\x20\
\x4e\x4b\xb7\x7b\x7f\x55\xda\xe9\x3b\xd3\xc5\x53\x8f\xbd\x8d\x65\
\x5a\x74\x1c\xdb\x83\x38\x75\xaa\x0f\x4d\xd3\xc8\x2f\x08\xd3\xf9\
\x55\x0b\x55\x4f\xec\x90\xf3\x3c\xac\x70\x58\x30\x00\x71\xfa\x6c\
\x17\x5d\xc7\x3f\x61\x72\x72\x8c\xd8\x74\x0c\x3d\x69\x21\x7a\x7a\
\x7a\x09\x85\x72\x39\xd1\xdb\x46\xff\xe0\xf7\x84\x82\x21\xfc\x7e\
\x3f\x5e\x9f\x97\xfa\x0d\x87\x00\x18\xbd\x38\xc0\x0d\xd7\x15\xcd\
\xb7\xc1\xbc\xb1\xf3\x41\x61\x9a\x30\x93\x48\x32\x13\x9f\x01\xa9\
\xa2\x14\x17\x2f\xe7\x44\xef\x51\x7a\x4e\x77\x62\x59\xa0\xeb\x3a\
\xba\xae\x63\xe8\x06\x27\x7f\xec\x60\x64\x2c\x92\x4d\x8c\x93\xd6\
\xdb\x1a\x3a\xa4\x61\x18\xe8\x86\x81\xae\x9b\x08\xd5\x3b\x9b\x54\
\x53\x77\x37\xa6\x99\x44\x0b\xfa\x08\xf8\x03\x68\x5a\x80\x47\x56\
\x6f\xe1\xd6\xa5\xc5\x00\xec\x7b\x7f\x03\x35\x4f\xef\xc1\x65\x5c\
\xe7\x14\xae\xa9\xbb\x8b\x7d\x2d\xdd\x57\x3a\x58\x5f\x7b\x27\x12\
\x83\xc2\x82\x30\x8f\x3f\xba\x1d\xc3\x4c\x70\xdb\xb2\xd2\x85\x6c\
\xbd\x74\x79\x9b\x2b\xeb\xbf\x7f\xf7\x77\x5c\x53\x78\x13\x6b\x2b\
\xb7\x32\x3d\x7d\x99\x23\x9f\xbf\xcb\xe0\xe0\x6f\x0b\x21\x17\x0b\
\x2c\x08\x3f\x9d\xfb\x95\xc3\x9f\xb6\x32\x7c\x61\x84\xa3\x5f\x7c\
\x40\xfd\x96\x0a\xc7\xbc\x97\xb7\xde\x4b\xfd\xe6\x0a\xb7\x8d\x96\
\x6e\x55\x39\xdf\x3f\xc4\xa1\xb6\xd7\xb8\x38\x11\xc1\xe7\xf3\xa3\
\xaa\x2a\xad\x4d\x5d\xb3\xff\xec\x57\xca\xb1\xa4\xc4\x32\x2d\x0c\
\xd3\x44\xff\xc7\xe4\xc0\x7b\x3f\x38\x75\x2c\x5c\x57\xb8\xba\xa6\
\x04\xaf\x4f\xc3\x92\x16\xd2\x32\x01\x89\x94\xb3\x4d\x29\xaa\x07\
\x24\x28\x8a\x07\xaf\xd7\x4f\x6b\xf3\x31\xfe\x77\xfc\x0b\x20\x72\
\x40\xee\x7a\xb1\xda\x22\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x01\xdf\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xe0\x01\x15\x12\x23\x03\xb8\x35\x6e\xfa\x00\x00\x01\x6c\x49\x44\
\x41\x54\x48\xc7\xd5\xd4\xb1\x6b\x14\x41\x14\xc7\xf1\xcf\x2c\x17\
\x63\xa1\xa2\x45\x8a\x48\xba\xab\xae\xb3\x10\x2d\xc4\xce\xce\xce\
\x60\xb6\x88\x01\xc1\x3f\xe0\x40\xb0\x0d\xf6\x82\x60\x17\x4b\xad\
\xcc\x16\x36\xb6\xa2\x5d\x20\x85\x88\x5d\x08\xa9\x52\xa5\x08\x49\
\x11\xe5\xd4\x3b\x6e\x52\xb8\x91\x61\x38\x0e\xee\x2e\x47\x92\x69\
\x76\x76\xe6\xcd\x77\x7f\xef\xb7\x6f\x1e\x17\x75\x84\xf4\xe5\x31\
\xc5\x22\xed\xc0\xb3\x40\x33\x72\x84\x2f\x5d\x56\x9f\xb0\x3d\x11\
\x7c\x9d\x0f\x81\x12\x22\x3f\x03\x57\xea\xad\xc3\x2e\xb7\x96\xd9\
\x1d\x05\x5e\x24\xe0\x47\x81\x32\x72\x10\xb9\x57\x72\xf5\x0f\xf3\
\x91\x6f\xb8\x31\xc3\x9b\x51\x95\x17\x49\x0a\x4f\xeb\xe7\x5a\xc9\
\x06\xac\xb0\x17\x79\x55\x67\xf2\xe0\x2d\x8d\x51\xe0\xff\x83\x23\
\xb7\x03\xfa\x6c\xa6\x01\x5b\x7c\x6c\x32\x0f\x9f\xe9\x8f\x05\xc7\
\x5c\xad\x7c\x3f\x0d\x78\x49\x17\x7b\xe3\x54\x4b\x91\xff\xdc\x3e\
\xf1\xb4\x4a\x31\x55\x7e\x80\xb9\x82\xeb\x99\xf2\x70\x8d\x59\x78\
\xce\xef\x1c\x50\xd5\x62\x96\xb2\xca\xcb\x95\x7f\x3f\xf1\x3e\x0d\
\x68\x51\x2e\xd0\x59\xe0\xd7\x6b\x2e\x0f\x02\xe7\xf3\x41\xf0\xf7\
\xb5\x37\xed\x75\xee\xc2\x3b\x6e\x06\x5e\xd4\x1f\xfd\x9a\x2a\x1f\
\x04\xcb\xd7\x42\x9a\x7e\x8b\x4f\x81\x87\xf9\x25\x8a\x1c\xf5\xb8\
\xbf\xcc\x8f\x1c\x72\x62\xc7\xa0\xb5\x22\x81\xc7\x0e\x8b\x7d\x56\
\x23\x3b\xe1\x9f\xcf\xfb\xa8\x22\x77\x86\x81\xf3\x79\x35\x6e\x51\
\x54\xc4\x61\x87\xd3\xfd\x62\x9a\x5d\x71\x62\x78\x6f\x48\x4b\x98\
\x08\xde\xe5\x52\x83\xde\x54\xe0\x33\xfc\x9d\xaa\x2d\x67\x06\x6f\
\x8c\x7b\xb0\x3a\xc5\x06\x77\xfe\xc6\x31\x0e\xdc\x66\xdb\xf1\x5e\
\x1e\x72\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\xaa\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x05\x1d\x17\x30\x09\x31\x80\xec\x6f\x00\x00\x04\x37\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x5b\x6c\x15\x55\x14\x86\xbf\xbd\x67\xe6\
\x9c\x42\xa1\x02\x42\x8b\x40\xc5\xa8\x51\xa9\x10\xa3\xc8\x2d\x41\
\xa0\x68\x41\x1b\x40\xd0\x40\xa2\x12\x1f\x08\x11\xaa\xb4\x98\x4a\
\x23\x94\x4b\x8a\x12\x40\x01\x15\xda\x70\x29\x18\x31\x34\xd2\x6a\
\x68\x42\xd5\x0a\x81\x60\xa1\x22\x22\x95\x46\x10\x8d\xdc\xc1\x70\
\x95\x7b\xc3\x39\x67\xce\xcc\xde\xdb\x07\xf4\x94\x81\xb6\x1a\x1e\
\x5c\xc9\x24\x93\xac\xff\xff\xd7\xac\x35\xeb\x22\x68\xc1\xea\xea\
\xf6\x62\xdb\x0e\x5a\xfb\x5c\xb9\x7a\x8e\x4b\x97\x4e\x73\xf8\xd8\
\x5e\x8e\x9f\xdc\x87\x40\x20\x2d\x9b\xf6\x77\xdd\xc3\xbc\xa2\xb2\
\x26\xf9\xa2\x69\xd1\x9f\x10\x42\xd2\xbb\xf7\xe3\x06\xa0\xa0\x30\
\x5b\x78\xca\xc5\x18\xcd\xd2\xf7\xbf\x25\x77\xda\x10\x94\x52\x28\
\x5f\xe1\xfb\x8a\x36\xc9\xed\x59\xba\x78\xd3\xbf\x8b\x1f\xf8\xe5\
\x20\x47\x8e\xd5\x31\x6a\xe4\xcb\xa6\x25\x6c\xc1\xcc\x61\xf8\x4a\
\xe1\x7b\x3e\x91\x68\x14\x29\x6c\x06\xf4\x1b\xc3\x84\x57\x0b\x9a\
\x26\x54\x7c\xb1\x86\xdd\x75\x95\x7c\xf0\x5e\xb5\x69\xa6\x52\x01\
\xfc\xac\xb9\x23\x10\x56\x98\xee\xe9\xbd\xf8\xf3\xfc\x69\x8e\x1e\
\x3f\x40\x8f\x87\x07\x90\x9f\xb7\x38\x08\x9e\x9c\x37\x88\x95\xcb\
\x76\x00\x98\x26\xc4\x4c\x73\x01\x6a\x6a\x37\xb0\xbd\xb6\x9c\xcb\
\x57\x2f\xe0\xba\x1e\x5a\x09\x56\x15\xd7\x02\x20\x01\xa6\xcf\x1e\
\xd7\x92\xf0\xad\xef\x81\xac\x86\x3c\xf5\x22\xca\xc4\x49\x0a\x27\
\x91\x94\x14\x02\x7c\x72\xf3\x87\x35\x92\x26\xbe\x3e\x80\x35\xcb\
\x77\xdd\x4c\x14\x00\x4b\x8a\xc7\x23\xa5\xe0\xee\x0e\x5d\x19\xfe\
\x74\x0e\x69\xa9\xdd\x9b\xcd\x60\xce\xbc\x51\xb8\xae\x4b\x24\xe6\
\x12\xbd\x1e\xa3\x6d\x72\x47\xec\xc9\x79\x83\x51\x2a\x1e\x28\xec\
\x82\x25\xe3\x10\xc2\x90\xde\x2d\x83\xfe\x4f\x3e\x4f\x7a\xb7\x0c\
\x00\x5c\x37\x22\xc2\xe1\xd6\xe6\xa6\x0c\x12\x01\x1c\xc7\x46\x69\
\x85\xed\x79\x38\x21\x07\xd7\x8b\x20\x05\x02\xcb\x96\x01\xf1\xe4\
\x36\xed\x18\x3d\x62\x1a\x63\x47\xcf\xa0\x6b\x97\x47\x30\xc6\x70\
\xe6\xec\x21\xd6\x7e\x96\x4f\xe9\xda\x37\x9a\x2c\xd1\xec\xb7\x2b\
\x69\xdd\xaa\x0d\xa9\x1d\xd3\xb1\x2c\x81\xd6\x0a\xa9\x94\x87\x6d\
\x59\x01\xf1\x09\xaf\x2c\xe1\xc1\xfb\x7b\xa3\xb5\x46\x4a\xc9\xb6\
\xed\x1f\x53\xbd\xa5\x18\xcb\x72\x90\x52\xb2\xfa\xd3\x29\x4d\x06\
\x98\x59\x50\xc1\xb5\x86\xf3\xd8\xb6\x85\x52\x1e\x12\x71\x7b\xb7\
\x3b\x4e\x08\x21\x24\x0d\x0d\x17\xd8\x50\x35\x9f\xa3\x27\xea\x11\
\x42\x20\xa5\x44\x08\x81\x10\x82\x95\x9f\x4c\x4e\xb0\xca\x2a\x66\
\x35\x96\x74\xee\x37\x08\x21\x00\x83\x94\x52\xd2\xa1\x5d\xe7\x80\
\xb8\x14\x16\x67\xcf\x1d\xa6\xbc\xb2\x88\xcb\x57\xce\x20\xa5\x95\
\x10\x95\x52\x12\x0e\x27\x33\x7e\xec\xfc\x04\xfe\xe2\xe5\x53\x01\
\xfe\x87\x0b\xb7\x21\x04\xd8\x8f\xf5\x7c\x86\xac\xa1\xe3\x79\x67\
\xf6\xe7\x09\xe7\x9e\xfa\x2f\xd9\x53\xbf\x91\xb0\x93\x84\xc1\x20\
\x10\x18\x63\xf0\x55\x9c\x7e\x4f\x8c\xa1\x6d\x4a\x27\x36\x56\x2f\
\xba\xa9\x30\xc1\x99\x2b\x2b\x5f\x44\xeb\xa4\x54\x64\xff\x3e\xa3\
\xd1\x5a\x05\x9c\xb5\xbb\xca\x11\x42\xa2\x8d\xc6\x68\x83\x31\x1a\
\xdb\x0e\x31\x7c\x68\x0e\x6e\x3c\xc2\xe6\xad\xcb\xb9\x1e\xb9\x96\
\xc0\x5b\x56\x28\xc0\xd7\x4a\xf3\xd5\xe6\x12\x6c\xad\x6f\x2c\x9f\
\x60\x59\x6c\xb4\xd6\x68\xa1\x01\x9f\x94\xb6\x69\x64\x0f\xcb\xa3\
\x7a\x4b\x31\x0d\x0d\x17\xb1\xac\x10\xbe\xef\x25\xf0\x53\x5e\x2b\
\x0d\xf0\x7f\xac\xff\x1a\xc7\x0e\x63\x2b\xa5\x30\x26\xf8\x47\x95\
\x52\x48\x25\x51\xca\xa3\x57\xc6\x60\x7a\x66\x0c\x65\x43\xd5\x02\
\xe2\x5e\x14\xdf\x8f\xd3\x39\xf5\x01\x7a\xf6\xc8\x04\x4a\x6f\x5b\
\x3e\x6f\x15\x66\x11\x8d\x44\xb1\xa4\x85\xec\xdb\xb7\x0f\x3f\xec\
\xa9\x0a\x00\xa6\xe7\x97\xe3\x7b\x3e\x03\xfb\xbf\xc4\xa3\x3d\x32\
\xa9\xac\x5a\x48\x34\xda\x80\x6d\x85\x78\x61\xe4\x0c\x46\x3e\x97\
\xcf\x1f\xa7\x7e\xbb\x4d\x78\xfa\x9c\x67\xf1\x7c\x1f\xcf\x53\x08\
\xcb\xb9\xd1\x84\x39\x53\x33\x51\xca\xa5\xb4\xe4\xfb\xc0\xf8\xff\
\xbc\xbf\x86\x4d\x5b\x57\x90\x92\xd2\x89\xac\xcc\x09\x74\xe9\xfc\
\x10\x35\xdf\xad\xe3\xc8\xb1\xbd\xe4\x4e\x5a\x1d\x58\x17\x85\x45\
\xd9\xc4\x62\x2e\x91\x48\x0c\x21\x1c\x56\x2c\xad\x69\xec\xf0\x49\
\xb9\x83\x58\x55\xbc\xc3\x70\x07\x56\x58\x94\x2d\x62\x6e\x1c\x37\
\xe6\xe2\xf9\x9a\xd2\xe2\x9d\x8d\x5b\x11\x60\x55\xf1\x0e\xee\xd4\
\x62\x31\x17\x37\xe6\x12\x8f\x2b\xd2\x3a\xdd\xdb\xd8\x18\xb7\x1e\
\x83\xfd\xfb\x7e\x15\x65\xeb\x97\x89\x93\x27\x4e\xfd\x33\xbb\x2d\
\x3e\x53\x0b\x32\x45\x24\x12\xc5\xf3\x0d\x5d\xd2\xee\xe3\xdd\x39\
\xeb\x5b\xbe\xa1\x07\x7f\x3f\xce\xba\x8a\x59\x9c\x3d\x7f\x88\xd5\
\x25\xbb\x9b\xfd\xe2\x9c\x37\x07\xe2\xc5\x15\xa1\x50\x2b\x96\x7f\
\xb4\xed\xbf\x1d\x68\x80\x89\x39\xfd\x70\x42\xad\xfe\x1e\x24\x45\
\x69\xc9\xce\x80\x7f\x52\xee\x40\xa4\xb4\x71\x9c\x30\xcb\x16\x6f\
\xe6\x7f\xb7\xbf\x00\x39\xcd\xb9\xa4\x9e\x2e\x29\x19\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x79\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x06\x09\x11\x18\x36\x46\xed\xa9\x52\x00\x00\x05\x06\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x6b\x6c\x96\xe5\x19\xc7\x7f\xf7\xf1\x79\
\xdf\xbe\x2d\x20\x50\xa8\x50\x21\x99\x07\x02\xab\xd3\x39\x50\xc4\
\x71\x9c\x10\x9d\x5a\x8d\x26\x6e\x61\x48\xd8\x66\x22\x7e\xd0\x85\
\x19\x13\x92\x39\xe3\x34\x59\xa6\x2e\xc4\x38\xe7\x07\xe3\x92\x25\
\x55\x37\x32\x03\x38\x01\x83\xad\xa8\x68\x23\xd0\x72\x2c\x07\x5b\
\x4a\x4a\xc0\x0a\x62\x69\x38\xb5\xef\xfb\xbc\xcf\x7d\xd8\x87\x17\
\x15\x03\x52\x3f\xed\xfa\x72\x7d\xb9\xaf\xdf\x75\xe7\x9f\xfc\xaf\
\xbf\xe0\x12\xd5\xde\xbe\x1d\xad\x0d\x21\x38\x4e\x9e\xfa\x82\xfe\
\xfe\xcf\xe9\xee\xd9\xce\xa1\xc3\xbb\x11\x08\xa4\xd2\xe6\xb2\xe1\
\x97\xbb\x1b\x0f\xae\x0e\x00\x8d\x4d\x03\xe2\xfc\x79\x71\x71\xe8\
\x36\x84\x90\x18\xa3\xd9\xb5\xb7\x99\xdd\x1d\x2d\x64\x3e\x25\xc6\
\x00\x31\x12\x22\x78\xef\xf1\xce\x8f\xba\xe3\xe4\xae\xbe\xf3\x67\
\xcf\x5f\x70\x01\x7c\xef\x9e\x2e\x0e\xf6\xb4\xd3\xdd\xb3\x85\xc3\
\xbd\x1d\x40\xc0\xda\x04\x29\x24\x42\x0a\x62\x8c\xc4\x10\xab\x43\
\x08\x6e\xe6\xe1\xd6\xe2\xc5\x3e\xf7\xd5\x82\x6f\xc1\x57\xfe\xe7\
\x55\xb6\xb4\xaf\x22\xcd\x4e\x93\xcb\xe5\x31\x5a\xa3\x8d\x42\x29\
\x55\x81\x03\x11\x6e\x8f\x31\x6e\xb8\x61\x7f\xb3\xbf\x94\xa4\x8d\
\x4d\x03\xe2\x6b\xf8\xd2\x47\x67\x11\x82\x23\x97\xb7\xe4\x92\x04\
\x63\x0c\xc6\x6a\xb4\xd6\x28\xa9\x90\x52\x80\x90\x04\x9f\x89\x1f\
\xee\x5c\x1f\x18\xa2\x1a\x9b\x06\x84\x06\x58\xfe\xc7\xfb\x39\xd1\
\x7f\x84\x7c\x3e\x21\x97\x24\x58\x6b\xb0\x89\xad\xfc\x5c\x6b\xa4\
\x94\x40\xcc\xff\xa0\x63\x63\x49\x15\x4f\x0f\x09\x6e\xae\xbd\xe5\
\x47\xf0\x2e\x12\xa0\xef\xc4\x11\x8c\x55\x18\x6b\x30\x46\x63\xad\
\xc5\x1a\x83\xb5\x16\xad\x15\xb5\xa3\xeb\xc5\xa2\x5f\xfc\xf9\xfb\
\x81\xeb\xa6\xe5\x8a\xa5\xd3\x47\x97\x3d\x7e\xe7\x70\xbd\xf4\xd1\
\xd9\x78\x5f\xae\xc8\xa0\x75\xa5\x1b\x0d\x04\xae\xa8\x9f\x22\xa6\
\x4f\xbd\x3b\x5e\x51\x3f\x25\xbe\xbd\xb8\x3a\x02\x67\x81\xea\xef\
\x02\x6f\xb9\x7a\xce\x18\x3d\x58\x2c\x1b\x6b\x4a\x69\x36\x68\xb4\
\x40\xa0\xb4\x44\x6b\x85\x36\x0a\xa9\x24\x85\xea\x11\x2c\x98\xfb\
\x5b\x26\x5d\x7d\xa3\x08\x21\x7c\x05\xe6\x12\xe0\xed\xfb\x7e\xdc\
\x38\xb5\xca\x07\x51\x5d\x35\x3a\xf6\x1e\xed\x29\xba\xcc\x4b\xed\
\x7d\x86\x4d\x14\x4a\x2a\x88\x91\xe9\x53\xef\x66\xc6\x4d\xf7\x92\
\xcb\xe5\x09\x21\xc4\x75\x4b\x86\xc5\xa1\xa4\x10\x0b\xff\xb2\x7c\
\x92\xf7\xa2\xb3\xab\x4d\x9e\x3a\x75\x3c\x68\xad\x2e\x4b\x4b\x69\
\x9f\x46\x20\x10\xc4\x42\x61\x38\x77\xde\xf6\x30\x0d\x93\x7f\x0a\
\x22\x20\x84\x14\xeb\x96\xd4\x0c\xa9\xf1\xf8\xdf\xbf\xd9\xb8\xbf\
\xab\xb5\xe5\x48\x6f\x57\xf4\xce\x47\x29\xa5\x14\x42\x9c\x84\x88\
\x96\x52\x8a\x91\x23\xea\xe2\x83\x8b\x57\xd4\x54\x55\x55\x0f\xf8\
\xe0\x82\xd1\x86\x63\x5f\x74\xab\xa1\xc0\xb9\x25\x2b\x96\x6e\x6e\
\x5f\xb5\xd6\x79\x17\x8d\x32\x78\x17\xa2\x10\x42\x0a\x21\x9c\x10\
\x08\x7d\x5d\xc3\xad\x13\xe7\xcf\x5b\xf4\x99\x94\xca\x03\xd2\xda\
\x5c\xd8\xba\x6d\x0d\x6d\x3b\xde\xf2\xbf\x6b\x1a\xe0\xbf\x0f\x14\
\x2e\x0a\x2e\xfe\xe4\xf6\x05\xbd\x87\xb6\xbd\x07\x44\x62\x24\x10\
\xc9\xb2\xd2\x44\xad\x93\xe3\x37\x5c\x37\xbb\x58\x95\x1b\x83\x9e\
\x3e\xed\x1e\x1f\x82\x57\x21\x44\x19\x63\x60\xfd\x86\x97\xd9\x7f\
\x60\x93\xcc\xe7\x0b\xf6\x9c\x19\x2e\x58\xd0\x3b\x67\xe1\x04\xa5\
\xec\x67\x78\x17\x43\x70\x28\x65\xc5\xd8\xd1\xe3\xae\xba\x76\xf6\
\xfc\xee\x91\x23\x26\xb0\xa9\xf5\x4d\xd6\x6e\x78\x29\xa7\x43\xf0\
\x35\xce\xf9\x7e\xa9\x4c\xb6\x66\xed\x0a\xff\xf9\xb1\x4f\x49\x92\
\x24\x84\x10\x4a\xe7\xb9\xed\xeb\x05\xc7\x7e\xb6\x44\x2a\xc0\xb9\
\x2c\x4a\xa9\x98\x79\xf3\x42\x79\xcd\x55\x33\x6a\x3b\x0f\x6c\xe9\
\x7e\xff\xa3\xd7\xe3\x97\x5f\x1e\xe5\xcc\xc0\x19\x8c\x4e\x9c\xd8\
\xba\xb5\x6d\x4c\xa1\x50\x73\x66\x73\xdb\xca\x52\xe7\xc1\xd6\x58\
\xa8\x2a\x90\x24\x09\xc6\x1a\x20\x88\x6b\xa7\xcc\xa6\x61\xca\x3c\
\xb1\xf1\xb5\xc7\xc4\xd9\x51\xe3\x70\xae\xec\xeb\xc6\x5c\x49\xc3\
\xe4\xb9\x8c\x1f\x37\x99\x4f\xbb\x3e\x51\x9b\xdb\xd6\x84\xbe\xbe\
\xde\xe8\x3d\xa2\x58\x4a\x29\x0e\x16\x05\x51\x05\x01\xf0\x8f\x7f\
\x3e\x97\xdf\xba\x63\x55\xb1\xa6\x7a\x18\xb9\xc4\x92\x24\x09\xd6\
\x5a\xe6\xcc\xfc\x15\xf5\xe3\x27\xb1\xfa\xed\x67\x11\x42\x90\x24\
\x55\xfc\x7c\xc1\x23\x8c\x1a\x59\xcf\xa6\xd6\x37\xd8\xd9\xd1\x42\
\x8c\xe0\xb2\x8c\x34\x2d\x93\xa6\x29\x83\xa5\x94\xe2\x40\x49\xd8\
\xa4\x10\x35\x40\xfb\x8e\x77\xca\xd1\x4b\xb2\xac\x8c\x92\x12\xad\
\x35\x77\xdd\xb6\x8c\xcc\x0d\xf2\xda\xbf\x9f\x60\xd8\xb0\x5a\xe6\
\xcf\xfd\x0d\xe3\xea\xae\xe1\x83\x8f\x9b\x38\xd8\xb3\x9d\x52\x69\
\xa0\x02\x76\x19\xe5\x72\x46\x96\x55\x7a\xb9\x54\x46\xeb\x24\xfe\
\x7d\x45\xcb\x37\x27\xf7\xa1\x47\x66\x89\x88\x8b\xa3\x46\x8e\xe6\
\x81\x5f\x3e\x8d\xf3\x25\xd6\xac\x7b\x9e\xeb\x1b\xe6\x71\xeb\xdc\
\x5f\xb3\xaf\xb3\x95\xe6\x8d\xaf\xa0\x94\x26\x46\x08\x21\xe0\x9d\
\xa3\xec\x1c\x2e\xcb\x28\xa5\x65\xd2\x52\x4a\xe6\x02\xaf\xfc\xad\
\xf5\xc2\xb0\xf8\xc3\x53\x0b\xb9\xb7\x71\x19\x21\xa4\xac\x6f\x7e\
\x89\x45\xf7\x3f\x45\x2e\x57\xc5\xca\xd5\x4f\x73\xf6\x6c\x3f\x42\
\x56\x9e\x07\x1f\x2a\x70\xef\xc9\x9c\x23\x2b\x57\x64\x29\x97\x3d\
\x75\x63\x27\xf2\xcc\x93\xff\xba\x78\x12\x75\xec\xde\xc7\xae\xbd\
\x2d\xcc\x9a\x71\x1f\x3b\xf7\xbc\xcb\x87\xad\xaf\x63\xb4\x46\x4a\
\x85\x10\x82\x48\x24\x86\x88\x0f\x1e\xef\x3c\xce\x79\xd2\x34\x25\
\x44\xc9\xe5\xb5\x13\xf8\xd3\x93\x6f\x5c\x3a\x43\xbb\x3a\x0f\xd1\
\xb4\xf2\x09\x8e\x1d\x3f\x80\xb5\x49\x25\x89\xa4\x40\x08\x71\x2e\
\x43\x23\xc1\x07\x9c\xf7\x64\x65\x8f\xb5\x79\x5e\x7e\x61\xe3\x85\
\x37\xe7\xbb\xac\xfd\xe0\xc3\x37\x61\x6c\x9e\x10\x03\x31\xf8\x73\
\x46\x8c\x40\x44\x2a\x0d\x11\xa4\xd4\x18\x93\xf0\xe2\x5f\x37\xf0\
\x7f\xaf\xff\x01\x1c\xd0\x60\x2b\xc5\x58\x53\xd0\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x05\x0f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x08\x07\x14\x2d\x16\x86\x96\x40\x95\x00\x00\x04\x9c\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x7d\x4c\x95\x65\x14\xc0\x7f\xcf\xfb\x7d\
\xef\x45\x53\x82\xf2\x86\xb9\xd6\x74\x2e\xb7\xfe\x6b\x43\x6c\x98\
\xba\x59\xb6\x6c\xd6\x70\x6e\x7d\xcc\x9a\x5f\xa1\x29\x4b\x9b\x49\
\x1a\x86\xcd\x58\x99\x1a\x7e\x12\x4e\xfa\x32\x6d\x2c\x33\xb6\x6c\
\x59\x91\x20\xca\x52\x24\x11\x91\x86\x26\xe1\x6c\xa2\xa2\x73\xa6\
\xc0\xbd\xef\xe7\xd3\x1f\x5c\xf0\xaa\xe8\x5a\x6b\x9d\x7f\x9e\xf7\
\x39\xef\x73\x7e\xef\x79\xcf\x39\xcf\x39\x82\x3b\x48\x7d\xfd\x11\
\x34\x4d\x27\x08\x3c\xae\xfc\x75\x81\xcb\x97\xdb\x39\xd5\x76\x84\
\xd3\x67\x8e\x21\x10\x28\xaa\xc6\xe0\xbb\xa2\xac\x2c\xfc\xa2\x5f\
\x7b\xd1\x3f\xf4\x57\x84\x50\xd0\x75\x8d\xc6\xe6\x9f\x38\xd6\x54\
\x89\xeb\xdb\x48\x19\x80\x94\x04\x12\x7c\xdf\x67\xf3\x87\xfb\x99\
\x39\x37\x93\x94\xc8\x60\xd6\xad\xde\x73\x0b\x47\xbb\x59\xd1\x7c\
\xfc\x24\xad\x6d\xf5\x9c\x6a\x3b\xc4\x99\xb3\x4d\x40\x80\x61\x98\
\x14\x17\xfd\x2c\x01\xf2\xdf\x7e\x52\xc8\x40\x12\x04\x01\x00\xe1\
\xb0\x45\x67\xf7\x65\x66\xbf\x3a\x86\xac\xcc\x67\x99\x31\x7d\x71\
\xff\x9e\x97\x7f\xb5\x95\x43\xf5\xbb\xb0\xdd\xab\x58\x56\x08\x5d\
\xd3\xd0\x74\x95\x95\x05\xbb\x65\xf2\xb9\xc2\xa2\x67\x44\xe1\xd2\
\x8a\xbe\xfd\xd6\xcf\x0a\xb9\xd8\xd1\xce\x1f\xa7\x9b\x79\x68\x64\
\x16\x8b\xf2\x56\xdf\x08\xcf\xcd\x1b\x4b\x10\x78\x58\x21\x03\xcb\
\x34\xd1\x75\x1d\xdd\xd0\x28\x7c\xb3\x42\xf6\x17\xba\xdd\x7b\x3e\
\x12\x93\x27\xe5\x52\xbd\xff\x6b\xc6\x65\xe7\xf0\xda\x92\xf1\xd8\
\xb6\x4b\xe0\x0b\x4a\x37\xec\x07\x40\x01\xc8\x2f\x98\x86\xef\xbb\
\x58\x56\x0f\xd8\x30\x74\x4c\xcb\x48\x06\x1b\x40\x35\x70\x0e\x18\
\x05\x30\x79\x52\xae\x04\x18\x97\x9d\x23\x96\xbf\x3b\x85\xe2\xf7\
\xab\xb0\x2c\x03\xf0\x58\xb0\xe8\xf1\xeb\x9e\xcf\x9a\x97\x85\x61\
\xaa\x84\x42\x21\x2c\xd3\xc0\x4c\x7c\x20\x7f\x51\x39\x00\x9f\x7f\
\x99\xcf\xf4\xe7\xde\x93\x49\x79\x0a\x00\x99\xb0\x0f\x01\xf1\x84\
\x8e\x1e\xde\x68\x06\x44\xd2\x10\xb9\x79\x8f\xe1\xfb\x0e\xe1\x88\
\x45\xc8\xb2\xb0\x2c\x93\xc2\xa5\x15\xbd\x86\x9c\xf9\xb3\x99\xfb\
\x87\x8e\x42\x08\xd1\x0e\x44\x81\x87\x81\x36\xa0\x0b\xd0\x01\x2f\
\x01\x16\xcb\xde\x79\x8a\xee\xee\x18\xf1\xb8\x8b\x40\x47\x13\x08\
\x54\x4d\x41\xd3\x54\x34\x5d\xed\x05\x03\x20\x65\x4f\x55\x48\x29\
\x49\x80\x01\x3a\x80\x81\x40\x37\xe0\x26\xa5\x41\x86\x43\x29\x22\
\x25\x9c\xc6\xd9\x73\x6d\x78\xae\x8f\xe2\xfb\x2e\x9a\xaa\xa2\x2a\
\x37\x54\x45\xa6\xe7\x79\x04\x41\x80\xa2\x28\xec\xdd\x57\x96\x9c\
\xcb\x8e\x84\xd7\x6a\x22\x17\x19\xbd\x7f\xb9\x6c\x71\xb9\xbc\x7a\
\xad\x03\x4d\x53\xf1\x7d\x17\x31\x67\xfe\xa3\x18\xa6\xca\xc6\x35\
\x35\x92\xff\x48\x16\xe6\x4f\x10\x9d\xd7\x62\x28\x8a\xa2\x90\x3a\
\x68\xc8\xbf\xe5\x9c\xeb\xf7\xda\x0b\x81\x10\x20\x4a\xb6\xac\x60\
\xe2\x84\x17\x89\x44\xc2\x44\xa3\xd1\x5e\xef\x87\x00\x17\x6e\xb2\
\x91\xb7\x69\x19\xe9\xc0\xc5\xde\xf7\x85\x45\x39\x62\xf8\x83\xa3\
\x09\x5b\xf7\x20\x1a\x1a\x1a\x89\x44\x42\x44\x22\x29\x84\x42\x16\
\xa9\xa9\xa9\x7d\x90\xcd\x5b\xe7\xa0\xaa\x1a\x8a\x22\x98\xfd\xd2\
\xa6\x3e\x7d\x49\xd9\x6c\x54\xd5\x60\xce\xcb\x9b\xd8\x50\x3a\x83\
\x05\xaf\x7c\x2c\x01\x8e\x1c\xad\x12\xa9\x83\x86\x51\x53\xbb\x93\
\xea\x03\x3b\x51\x82\xc0\xc7\xf3\x7c\x14\x55\xa7\x62\xf7\x5a\x56\
\xae\x9a\xda\xe7\xd9\xbc\x59\x5b\xf0\x7d\x8f\x94\x48\xea\x0d\xae\
\xf6\x82\x01\xb2\xb3\x9e\x07\xa0\xa1\xb1\x4a\x54\x1f\xd8\x4e\xe9\
\x27\x79\xd4\x35\x7c\x87\xae\x99\x88\xba\xba\xc3\x44\x22\x03\x38\
\x78\xb8\x9c\x13\xad\xb5\x44\xc2\x11\x4c\xd3\x44\x37\x74\x5e\x9f\
\xbf\x4d\x03\xbc\xf6\xf3\xa7\xb8\x6f\xc8\xf0\x5b\xc2\x72\xad\xf3\
\x0a\x2d\x27\x7f\xe1\xe0\xe1\x0a\x2e\x5d\x3a\x8b\xef\x43\x2c\x6e\
\x13\xeb\x8e\x81\x54\x7b\x0e\x96\x7d\xba\x8a\xba\x86\x5d\x0c\x48\
\x19\x98\x74\x43\x0d\xc6\x65\xbf\xc0\xd0\x8c\x91\x64\x44\x47\x8c\
\x04\x5a\x7a\x5b\x81\xed\xc4\xdd\x9a\xda\x1d\x1c\x6d\xaa\x44\x4a\
\xf0\x5c\x17\xdb\x76\xb0\x6d\x9b\xee\xb8\x4d\xac\x2b\x8e\x61\x46\
\x7a\x7a\x4b\x7d\xc3\xf7\x48\x5f\xc1\x75\x1d\x5c\xd7\xc3\xf7\x7d\
\x9e\x9e\xb4\x90\xcc\x47\x26\xcb\x8c\xe8\x08\x59\x52\x36\xff\x44\
\x52\x54\x5c\xd3\xb0\xe4\xc4\xf1\x33\xa4\x0c\x7a\xc0\x8e\xe3\xe2\
\x26\x56\x27\xee\xa0\x69\x26\x9b\xd6\x56\xf6\xc0\x4b\xd6\x55\x21\
\x84\x46\x2c\xe6\xa0\x28\x06\xd3\x9e\x2d\xc4\xf3\x83\xbe\x10\xcc\
\x9d\xb9\x51\x26\x95\x5e\xef\xb3\xe2\xd8\x36\x71\xdb\xc1\x71\x1c\
\xe2\xb6\x83\x6d\x3b\x04\x12\x4a\xd6\x55\x5f\xef\x8a\x00\xa5\x1b\
\x6a\x48\xbf\xfb\x01\xa6\x4e\x29\xa0\xab\xab\x93\x6f\xbe\x2d\xa6\
\xb5\xb5\xe5\xe6\xd2\x8b\x26\xad\x32\x6e\xdb\xd8\xb6\x4d\x3c\x6e\
\x63\xc7\x6d\x1c\xc7\xe7\xde\xf4\x61\xb7\x1f\x73\x4d\xc7\x7e\xa3\
\xb1\xb9\x92\xb1\x63\x72\x38\x7a\xfc\x47\xf6\xd5\x6e\x67\x4d\x51\
\xa5\x48\xea\x7a\x02\xe0\x8d\x82\x27\xf0\xbd\x9e\x4a\xb3\x6d\x9b\
\x40\x2a\x44\xd3\x87\xb1\x62\xf9\x8e\x3b\xcf\xd0\x93\x27\x4e\xb3\
\xad\xfc\x2d\xce\x77\xfc\x8e\x61\x98\xa8\xaa\xca\xfa\x0f\xaa\xd2\
\x80\xb4\x85\x4b\x26\xb4\x04\x52\x12\xf8\x01\x9e\xef\xe3\x3a\x3e\
\x86\x11\x62\x73\xf1\xde\x7f\x36\xa0\x01\x66\xcd\xcd\x44\x37\x42\
\x04\x32\x40\x06\x3e\x20\x13\xdd\x51\xa2\xa8\x1a\x48\x50\x14\x0d\
\x5d\x37\x59\xbf\xfa\x07\xfe\x77\xf9\x1b\xfd\x66\xfd\xe9\x0b\x3b\
\xd4\xe6\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x35\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x08\x07\x16\x1e\x2c\xb7\x75\x28\x79\x00\x00\x03\xc2\x49\x44\
\x41\x54\x48\xc7\xbd\x95\xef\x4f\x96\x55\x18\xc7\x3f\xe7\x3e\xe7\
\xbe\xef\xe7\xe6\xd1\x25\x84\x16\x2f\x62\xad\xb5\xb5\xfe\x80\x44\
\x6c\x9a\xb9\x55\xb6\xdc\xcc\xd1\x5c\x6d\xad\x17\x45\x85\x56\xae\
\x68\x09\x99\x10\x38\xa3\x5f\x68\x20\x25\xd9\x7c\xd3\xca\x8a\x32\
\x56\xad\xcd\x1f\x51\x10\xe4\xe6\x0f\x7e\x88\x42\x4b\xd3\x74\x6e\
\x9a\xa1\x63\x25\xca\xf3\xdc\xf7\xfd\x9c\x73\x7a\x41\xa8\x21\x39\
\xa1\xd6\xf5\xe6\xec\x9c\x5d\xd7\x67\xdf\x7d\xcf\xb9\xae\x23\xb8\
\x42\x74\x76\x76\xa3\x94\x8b\x31\x19\x7e\xff\xe3\x37\x06\x07\x4f\
\x72\xf8\x68\x37\xc7\x8e\xef\x47\x20\x70\xa4\x22\xfb\x9a\x3c\xd6\
\x54\x7d\x38\x6e\xbd\x18\x1f\xda\x85\x10\x0e\xae\xab\xe8\xed\xff\
\x86\xfd\x07\x5a\x88\x75\x88\xb5\x06\xac\xc5\x58\xd0\x5a\xb3\xe1\
\xad\x0e\x1e\x5b\x5a\xc0\x94\x64\x36\xf5\xb5\xdb\x2e\xe3\xa8\xb1\
\x07\xfd\x7d\x87\x38\x72\xb4\x93\xc3\x47\x77\x73\xfc\xc4\x01\xc0\
\xe0\x79\x3e\x75\x35\xdf\x02\x50\xfe\xf2\xbd\x58\x63\x31\xc6\x00\
\x90\x95\x95\xe0\xdc\xf0\x20\x8f\x3f\x35\x9b\xc2\x82\xc5\x3c\xfa\
\xc8\x0b\xe3\x2b\x6f\xfa\x6c\x13\xbb\x3b\x9b\x09\xe3\xb3\x24\x12\
\x01\xae\x52\x28\x57\x22\xa5\xa4\x7a\xe5\x97\x17\xf2\xaa\x6a\xee\
\xa7\x6a\xe5\x17\x17\xf6\x9b\xde\xaf\xe2\xf4\xc0\x49\x7e\x39\xd6\
\xcf\xad\xb7\x14\x52\xba\xbc\xf6\xef\xf0\x92\xe5\x73\x31\x26\x43\
\x22\xf0\x48\xf8\x3e\x6f\xac\xd9\x0e\xe0\x00\xe6\xaf\x3c\x7b\xa9\
\x90\xaf\xb7\xbd\xcb\xc2\x05\x25\xb4\x75\x7c\xce\xbc\x39\x45\x3c\
\x5b\x76\x27\x61\x18\x63\xb4\x60\x63\x43\x07\xa3\xc5\x94\x57\x2c\
\x41\xeb\x98\x44\x62\x04\xec\x79\xee\x28\xa3\x0f\xe8\x02\xfa\xc7\
\xb8\x67\x17\x2e\x28\xb1\x80\x9d\x37\xa7\x88\xca\x57\x16\x51\xf7\
\x7a\x2b\x89\x84\x07\x64\x78\xa6\xf4\xee\x8b\xca\x8b\x97\x15\xe2\
\xf9\x92\x20\x08\x48\xf8\x1e\xbe\xef\x53\x59\xde\xcc\x18\xb5\x97\
\x5a\x68\xaf\xf4\x28\x8a\x97\xcd\x62\x6a\x32\x17\x55\xb2\xfc\x0e\
\xb4\x8e\x70\x5d\x17\x57\xa9\x91\xd5\x55\x4c\x26\x5e\x5a\x7d\x1f\
\xc3\xc3\x29\xd2\xe9\x98\x30\x1e\xc6\x11\x08\xa4\x72\x50\x4a\xa2\
\x5c\x89\x23\x1d\x5e\x7c\xfe\xd3\x09\x83\x5f\x5b\xf7\x10\x59\xc1\
\x14\x66\xe4\xde\x80\x94\x02\x63\x34\x8e\xd6\x31\x4a\x4a\xa4\x23\
\xc1\x5a\x2a\xcb\x9a\x27\xa5\xba\xbc\xf4\x63\xa4\x54\x9c\x1d\x1a\
\x40\x29\x89\xd6\x31\xe2\x89\xa7\x6f\xc7\xf3\x25\x6f\xaf\x6d\x1f\
\xaf\x66\x52\x9e\x3f\x57\x3e\x9f\x73\x43\x29\x1c\xc7\x71\xc8\x99\
\x76\x3d\xff\x65\x08\x21\x10\x02\x44\xe3\x7b\xd5\xdc\x35\xff\x61\
\x92\xc9\x2c\xf2\xf2\xf2\xfe\xb5\xf2\xaa\x9a\x22\x6e\xbe\x69\x16\
\x59\x89\x19\x88\x9e\x9e\x5e\x92\xc9\x80\x64\x72\x0a\x41\x90\x20\
\x27\x27\x67\xd2\xf0\xee\x7d\xad\xe4\x4c\xcb\xa7\x7d\xe7\x16\xda\
\x7e\xd8\x82\xe8\xea\xea\x26\x08\x02\xb2\x73\x72\xd9\xba\xa3\x9e\
\x93\xa7\x7e\x62\xd5\x8a\x2d\x13\x86\xf7\xf4\xb6\xd2\xda\xb1\x99\
\xd3\xa7\x7f\x65\xe8\xfc\x10\x71\x68\x10\x7b\xf6\xec\x25\x99\x9c\
\xca\xae\xbd\x4d\x1c\x3c\xb2\x93\x64\x56\x12\xdf\xf7\x29\x2f\xfd\
\xe4\xaa\xe1\x0d\x1b\x9f\xe4\xcc\x99\x13\x68\x0d\xa9\x74\x48\x6a\
\x38\x05\x56\xe2\xcc\x9c\x79\x1b\xbb\xf6\x7e\xc5\x9e\x9e\xad\x18\
\x03\x71\x1c\x13\xc7\xf1\x84\x2e\x70\x68\x68\x10\x90\x64\x32\x19\
\xe2\x4c\x86\x38\xd6\x08\xe9\x8e\x8c\xdc\xce\x9e\xad\x58\xed\x10\
\xc7\x11\xd2\x71\x50\x6a\x62\x1d\x1a\x45\x23\x82\xa2\x28\x26\x4a\
\x47\x28\xe5\xf3\xce\xba\x96\x91\xc1\xd5\x58\xdf\x8a\x10\x8a\x54\
\x2a\xc2\x71\x3c\x96\x2c\xae\x9a\x20\x3c\x22\x1d\x46\x84\x61\x84\
\xb1\xd0\x58\xdf\x76\x71\x2a\x02\x6c\x6c\x68\x67\xfa\xb5\x37\xf2\
\xc0\xa2\x0a\xce\x9f\x3f\x47\xf5\xab\x0f\x8e\xfa\x2c\xc6\x69\x14\
\x01\x88\x55\xab\x17\x8a\xb2\xca\x05\xa4\xd3\x21\x61\x3a\x24\x8a\
\x34\xd7\x4d\xcf\xff\xe7\xee\x3a\xb0\xff\x47\x7a\xfb\x5b\x98\x3b\
\xbb\x88\x7d\x7d\x3b\xf8\x7e\xe7\x66\xd6\xd6\xb4\x5c\xa6\x76\x45\
\xc5\x3d\xe8\x8c\x26\x93\xd1\x84\x61\x88\xb1\x0e\x79\xd3\xf3\xa9\
\xae\xfc\xe8\xca\x7f\xe8\xa1\x83\xc7\xf8\xa0\x69\x15\xa7\x06\x7e\
\xc6\xf3\x7c\xa4\x94\xac\x7f\xb3\x75\xa4\xb5\xcb\xe6\x63\xac\xc5\
\x68\x43\x46\x6b\xe2\x48\xe3\x79\x01\x1b\xea\xbe\xbb\xba\x0f\x1a\
\xa0\x78\x69\x01\xae\x17\x60\xac\xc1\x1a\x0d\x58\xac\xb5\x80\xc5\
\x91\x0a\x2c\x38\x8e\xc2\x75\x7d\xd6\xd7\x6e\xe7\x7f\x8f\x3f\x01\
\xf0\xd9\x8b\xc3\x32\x43\x6f\xfe\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\xf5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x16\x15\x0b\x25\x1d\x53\xc6\xce\x00\x00\x03\x82\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x7d\x68\x55\x65\x1c\xc7\x3f\xcf\x39\xe7\
\xbe\x9c\x73\xb7\x64\xf3\x66\xd9\x5f\xbd\x0c\x44\x91\x8a\xc0\x79\
\x1d\x18\x26\xb3\x82\x82\x15\x1b\x11\x42\x26\x6b\x51\x2b\x75\xe4\
\x7a\x21\x97\x35\x45\xb3\x9a\x90\x9b\x9a\x08\x66\x2f\x2a\x31\x4c\
\x04\x5b\x0d\x0b\xbb\x23\x89\xdc\x96\x33\xed\x85\xe6\xdd\x72\x09\
\x77\xd7\xbd\x60\xb6\xeb\xee\xdd\xed\xbc\xf5\x87\x67\xb7\xe3\xed\
\xdc\x75\xff\xea\x07\x87\xc3\xf3\xc2\xf7\xf9\x7e\x7f\xbf\xe7\xfb\
\x7b\x04\xff\x8e\x24\x50\xe4\x31\xcf\xa7\x47\xf6\x8b\x81\x0b\x7d\
\x0c\x5d\x3c\x87\x40\x20\xc9\x0a\x25\xb3\xe6\xb2\xa5\xf9\xa0\xd7\
\x76\xa4\x9c\xb1\x0d\x54\x01\x77\x03\x25\xc0\x0d\xce\x17\x01\x9a\
\x6a\xaa\x6b\x27\x12\x23\x3f\xe1\xf3\x4b\x08\xd9\x62\xf7\xbb\x27\
\x48\x8c\xc4\x68\x78\xf1\xc1\xff\x04\xb7\x01\x3f\xf0\x35\x70\x16\
\xb8\x02\xdc\xe6\x28\xe9\x06\xb6\x01\x2b\x5a\x5b\xba\x26\x34\x35\
\x48\x48\x53\x01\xd0\xb4\x20\x57\x53\x97\x79\xfa\xf9\x0a\xf6\x7f\
\xdc\x72\x1d\xb8\x70\x01\x8b\x9c\x83\xdd\x6b\xee\x58\x0c\x7c\xe5\
\x28\x02\x60\xdf\x47\xcd\x8c\x8d\x0e\xf3\xdb\xd0\xcf\xcc\x9f\xb7\
\x84\xf5\xeb\xb6\x67\x99\x67\x80\x79\x1e\xaa\x6c\x0f\x60\x80\x1e\
\xe0\xad\xe9\xb5\xae\x93\x47\xa8\x7b\xb2\x99\x91\xf1\x18\x8a\x4f\
\xd0\x1f\xeb\xbe\x2e\x2d\x49\xe0\x26\x0a\x0f\x1b\x38\x31\x3d\x58\
\xb6\xb4\x9a\xd7\xb7\x56\xb1\xe3\xed\x28\xc1\xa0\x1f\x30\x58\xbb\
\xfe\xfe\xac\xf4\xf3\x40\x05\x30\xee\x1c\xe6\x66\x5c\x0a\x5c\x76\
\x01\xab\x40\xda\x49\xc9\x9f\x1e\xa9\x04\xa0\xee\xb9\x08\xc5\xa1\
\x30\x8a\xc3\x7c\x3c\x27\x05\x09\xe7\x3f\xe2\x52\x35\xd7\xb5\x3e\
\xcb\x0b\xb4\x69\xf3\x43\xa4\x52\x69\xa6\xa6\x74\x32\x7a\x0a\xc5\
\xa3\x80\xee\x98\x03\x8c\x7a\xcc\x97\x7a\x81\x6b\x6a\x11\x45\x5a\
\x98\x78\xe2\x02\x86\x6e\x22\x01\xc5\xf9\x98\x00\x63\x79\xe6\x97\
\x78\x32\x7f\xa9\x9d\x89\xe4\x28\x8a\x22\x63\x9a\x3a\x92\xc3\xa2\
\x62\x86\xe2\xe1\x71\x45\x57\xe4\xab\xf6\xb6\x4d\x9d\x08\x21\x00\
\x1b\xe9\xd9\x75\x4b\xc3\xc0\x17\x8e\x02\xaf\x7b\x9e\x9b\xae\x6a\
\xa0\x21\x5f\x31\x01\x84\x10\x08\x01\xca\x5d\x0b\x2b\x19\x18\xf8\
\x50\x94\x95\xdd\x61\x3b\x96\xbf\x92\x63\x2c\x37\xfb\x1a\xe0\xf0\
\x0c\xc6\xa3\xf9\xcd\x6a\xca\x6e\x8f\xa0\x05\xe7\x20\x45\x16\x3d\
\x82\x65\x99\xc4\xe3\xc3\x02\xf8\x03\x58\xee\x34\xae\x69\x60\x09\
\xb8\x07\xd8\xed\x02\xce\xa6\x6d\xe7\xde\xda\xec\xa0\xef\x87\x28\
\xab\x57\xbe\x83\x65\x5a\x74\x1c\xdf\x85\x38\x7d\xba\x0f\x55\x55\
\x29\x29\x0d\xd3\xf9\x65\x2b\xb5\xab\xb6\xd8\x33\x18\x48\xe4\xab\
\xc3\x99\xb3\x51\xa2\x27\x0f\x31\x36\x96\x20\x39\x99\x44\xcf\x58\
\x88\x9e\x9e\x5e\x42\xa1\x62\x4e\xf5\xb6\xd3\x3f\xf8\x2d\x21\x2d\
\x44\x20\x10\xc0\xe7\xf7\xd1\xb8\xe6\x00\x00\xc3\x97\x06\xb8\xe5\
\xe6\xb2\x19\x0b\xfd\xc6\xd6\x87\x85\x69\x42\x7a\x2a\x43\x3a\x95\
\x06\x5b\x46\x2a\x2f\x5f\xc4\xa9\xde\x63\xf4\x9c\xe9\xc4\xb2\x40\
\xd7\x75\x74\x5d\xc7\xd0\x0d\xba\xbf\xef\x20\x9e\x88\xe5\x02\x7b\
\x7a\x62\x53\x53\x87\x6d\x18\x06\xba\x61\xa0\xeb\x26\x42\xf6\x5d\
\xdb\x54\xdf\x70\x1f\xa6\x99\x41\xd5\xfc\x04\x03\x41\x54\x35\xc8\
\x63\x8f\x6e\x60\xc1\xfc\x72\x00\xf6\xbc\xbf\x86\xfa\xa7\x76\x15\
\x7c\x55\xeb\x1b\x96\xb1\xa7\xb5\xeb\x1f\x06\xcf\xac\xbd\x17\x1b\
\x83\xd9\xa5\x61\x9e\x78\x7c\x33\x86\x39\xc5\x9d\x0b\x23\x85\x36\
\x32\x4f\x65\xd9\xc7\x62\xef\xce\x6f\xb8\x71\xf6\xad\xd4\x54\x6d\
\x64\x72\xf2\x2a\x47\x3f\xdb\xc1\xe0\xe0\xaf\x85\x80\x8b\x02\x0f\
\x84\x1f\xcf\xfd\xc2\xc1\x4f\xda\xb8\xf8\x7b\x9c\x63\x9f\x7f\x40\
\xe3\x86\x4a\xcf\x7d\x2f\x6f\x7c\x80\xc6\x57\x2b\xbd\xfa\x7f\xb6\
\xab\x7a\xba\xec\x7c\xff\x10\x07\xda\x5f\xe3\xd2\x68\x0c\xbf\x3f\
\x80\x2c\xcb\xb4\xb5\x44\x01\x78\xe1\x95\xe5\x58\xb6\x8d\x65\x5a\
\x18\xa6\x89\xfe\x97\xc9\xbe\xf7\xbe\xf3\x62\x2c\xf2\x5a\xb8\xae\
\x7e\x31\x3e\xbf\x8a\x65\x5b\xd8\x96\x09\xd8\xd8\xf6\x35\x52\x92\
\xac\x80\x0d\x92\xa4\xe0\xf3\x05\x68\xdb\x7e\x9c\xff\x3d\xfe\x06\
\x9a\x76\x3f\xce\x8e\xa1\xe2\x01\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x04\xf2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x08\x07\x15\x18\x20\xea\xdf\x7d\x8d\x00\x00\x04\x7f\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x6b\x6c\x55\x45\x14\x85\xbf\x99\xf3\xbc\
\xbd\x54\xc5\xf8\xa0\x89\x4a\x22\x18\x91\xff\x3c\x8a\xf2\xb0\x89\
\x51\x23\x1a\x10\x43\x02\x21\x84\x20\x90\x02\xda\x48\x15\xa8\x50\
\xb0\x54\xd2\x00\x02\xf2\x2e\x20\x20\x2a\x0f\x2b\x4a\xd0\x54\x01\
\xa9\x14\x68\x1a\xa0\x94\x57\xa1\x46\x10\x2c\xc1\x00\x0a\xa4\x22\
\x2d\xb7\xf7\xbc\x66\xfc\x71\xa1\x96\xb6\x20\xbf\xdc\xc9\xc9\xc9\
\xd9\x39\xb3\x66\xcd\xde\x6b\xed\x11\xdc\x25\xaa\xab\x8f\x60\x9a\
\x16\x4a\x85\x5c\xfb\xfb\x4f\xea\xeb\x2f\x72\xa6\xee\x08\xe7\xce\
\xd7\x20\x10\x48\xc3\xa4\xe3\xfd\x19\xcc\x2e\xd8\xd0\xee\x7a\xd1\
\x3e\xe8\x61\x84\x90\x58\x96\xc9\xf1\xda\x5d\xd4\x9c\x28\x23\x88\
\x3c\xb4\x56\xa0\x35\x4a\x43\x14\x45\xac\xf8\xb8\x82\x37\xc7\xf7\
\xa2\x43\xbc\x23\x8b\xe7\xef\x68\x83\x63\xb6\x4e\xd4\x9e\x3c\xcd\
\xd9\xba\x6a\xce\xd4\x1d\xe4\xfc\x85\x13\x80\xc2\xb6\x1d\x16\x15\
\xfd\x04\x40\xde\x07\x2f\xa3\x95\x46\x29\x05\x40\x5a\x9a\x4b\x63\
\xa2\x9e\xb1\x13\xfb\x90\xd9\x6b\x30\xa3\x47\x4e\x6e\x9f\x79\xc9\
\x96\x35\x1c\xac\xde\x8a\x17\x5c\xc7\x75\x63\x58\xa6\x89\x69\x19\
\x18\x86\xc1\xac\x69\xdf\x36\xff\x57\x50\x34\x88\x82\x69\xdb\x9a\
\xbf\xd7\x7c\x56\xc0\x95\xcb\x17\xf9\xed\x5c\x2d\xcf\x3c\x9d\x49\
\x6e\xce\xfc\xdb\xc1\xb3\x73\xfa\xa1\x54\x88\x1b\xb3\x71\x1d\x07\
\xcb\xb2\xb0\x6c\x13\xd3\x34\xc9\x9f\xfc\x75\x9b\x23\x97\xee\x58\
\xc9\xc0\x97\xb2\xd9\x53\xf1\x0d\x03\xfa\x0e\xe1\x9d\xa9\xcf\xe3\
\x79\x01\x2a\x12\xac\x5a\x5a\x01\x80\x01\x90\x37\x63\x28\x37\x12\
\x7f\x11\x8b\x39\xb8\xae\x83\x6d\x5b\x38\xae\x8d\x6d\x59\x4c\x7f\
\x6f\x4b\xbb\xcd\xda\xb4\xa1\x14\x80\xce\x4f\x25\xe9\x9f\xd5\x8d\
\xb9\x85\x3b\xe8\x3b\xa0\x2b\x41\xe0\xd3\xe7\xb9\xee\x54\xed\x3f\
\x9b\x62\x3e\x66\x42\x26\xb6\x63\x10\x8b\xc5\x70\x1d\x1b\xc7\x49\
\x6d\x90\x97\x5b\x02\xc0\xe7\x9b\xf3\x18\x39\x6c\xce\x9d\x44\xa5\
\x5b\x97\x78\xcc\x84\xde\xa4\xc7\x1f\x42\x64\xe7\xf4\x27\x8a\x7c\
\xd2\xe2\x2e\x31\xd7\xc5\x75\x1d\x1c\xc7\xe6\xfd\x77\xbf\x02\xe0\
\xfc\xef\xb5\x3c\xfe\x58\x77\x84\xf8\xb7\x3d\x4b\x56\x8e\x22\x27\
\x7b\x7d\x4b\x60\x00\x31\xbd\xf0\x15\x12\x89\x26\x92\xc9\x00\x81\
\x85\x18\x9f\x33\x00\x2d\x02\xe2\xf1\x18\xb1\x98\x8b\x6d\xdb\xcc\
\x9c\xba\x35\x45\x49\xa7\x54\x21\x84\x40\x4a\x29\x00\xd5\x82\xe1\
\x6d\xc0\x73\x16\x0e\x23\x8a\x14\x52\xb8\x5c\xb8\x54\x47\x18\x08\
\x64\x14\x05\x98\x86\x81\x21\x0d\xd0\xba\x19\x38\x0c\x43\x94\x52\
\x48\x29\xd9\xbd\x77\x6d\xeb\x32\xe8\xd6\x5e\xc9\xcb\xdd\x8c\x61\
\x98\x5c\x6f\xb8\x8c\x69\x1a\x44\x51\x80\x18\xf7\xd6\xb3\xd8\x8e\
\xc1\xb2\x05\xfb\xee\x66\xd6\x5b\x6c\xd5\xbd\x98\x70\x52\x5e\x16\
\x8d\x0d\x4d\x98\x52\x4a\x1e\x7c\xa0\x13\xf7\x10\xed\x01\xc9\x56\
\xa7\xd0\x00\x42\x08\x84\x00\x51\xbc\x7a\x16\x2f\x64\x8d\x20\x1e\
\x4f\x23\x23\x23\xe3\xbf\xc0\x55\x3b\x79\xa3\x65\xbe\xa0\x68\x08\
\x5d\x9f\xec\x4d\x9a\xfb\x08\xb2\x77\x8f\x41\x28\x15\xa1\x94\xa6\
\xbe\xbe\xbe\x75\x6d\x0f\xdf\x7c\xf4\x4d\x00\x09\x88\xe2\xb5\x63\
\xc5\xea\xf5\x13\x05\x20\x97\xae\x1a\xdd\x0c\x7c\xe4\x58\x39\xa3\
\x86\xcf\x43\x45\x8a\xd2\x9d\xcb\x30\x95\x8a\x08\xc3\x08\x69\x58\
\x6c\x2b\x5d\xc8\xec\x79\x6f\x90\x3f\xa5\xd9\x91\x63\x5b\x6c\xf6\
\x49\x33\x55\xc3\x66\xdc\xa8\xe5\x00\xf4\xcd\x1c\x0e\xac\xe3\xe8\
\xf1\x72\xca\x2b\x36\x72\xe5\xca\x25\x1a\x6e\x34\x60\x99\x0e\xa2\
\xaa\xea\x10\xf1\x78\x3a\x07\x0e\x95\x70\xea\x6c\x25\xf1\xb4\x38\
\x8e\xe3\x90\x97\xfb\xa5\x06\x5e\x05\xf6\x00\xe9\xc0\xc5\xd6\x35\
\x6e\x68\xbc\xc6\x2f\xa7\xf7\x73\xe0\xd0\x36\xae\x5e\xbd\x40\x14\
\x41\x53\xd2\xa3\x29\xd1\x04\xda\xc0\xec\xd9\xb3\x07\x6b\xd7\xcf\
\xa3\xea\xe8\x76\xd2\x3b\xdc\x47\x10\x04\x48\x29\x6f\xad\xaf\x07\
\xba\x00\x83\x5b\x36\xcc\xf3\x93\xec\xab\xdc\xc4\xca\x75\x13\xd0\
\x1a\xc2\x20\x00\x0c\xc2\xd0\x23\x08\x43\x82\x20\xc2\x76\xdc\xd4\
\xc8\xad\x3e\xba\x1d\x1d\x49\x82\xc0\xc7\x90\x12\xd3\x6c\x9e\xc4\
\x95\xed\x75\x76\xf5\xa7\x6f\x93\x4c\xde\x48\x01\x87\x01\xbe\x1f\
\x10\x04\xa9\xb7\x9f\xf4\x31\x4d\x87\xe5\x0b\xcb\x90\x00\xc5\x8b\
\xcb\x11\xc2\xa4\xa9\xc9\x47\x4a\x9b\xa1\x83\x0b\xa8\x39\x79\x40\
\x14\xce\x1d\x22\xbe\xfb\x7e\xb9\x48\x24\x12\xa2\xa5\x14\x13\x89\
\x46\xc2\x30\xc2\xf7\x3c\x92\x9e\x8f\xef\xfb\x24\x3d\x1f\xcf\xf3\
\x51\x1a\x8a\x17\xef\x69\xab\xdd\xe9\x05\xc3\x79\xfd\xb5\x49\x28\
\xe5\xf1\xc3\xae\x65\x8c\x18\x5a\x40\x97\x2e\xdd\xee\xa8\xcd\xfc\
\xc2\x81\xa9\x32\xf8\x01\x9e\xe7\xe3\xfb\x11\x9d\x1e\xed\xcc\x87\
\x33\x37\xb7\x6f\x8c\x13\x35\x3f\x73\xbc\xb6\x8c\x7e\x7d\x86\x70\
\xec\xe4\x8f\xec\xad\xdc\xc8\x82\xa2\xb2\x36\xc0\x53\x66\xbc\x48\
\x14\xa6\x94\xe6\x79\x1e\x4a\x4b\x32\x1e\x7e\x82\x59\x33\x37\xdd\
\xdd\xbe\xa7\x4f\x9d\xe3\x8b\x92\x7c\xfe\xb8\xfc\x2b\xb6\xed\x60\
\x18\x06\x4b\x3e\x2a\x4f\x59\x7b\x6a\x16\x4a\x6b\x54\xa4\x08\xa3\
\x88\xc0\x8f\xb0\xed\x18\x2b\x16\xed\xbe\xb7\x0b\x1a\x60\xcc\xf8\
\x5e\x58\x76\x0c\xa5\x15\x5a\x45\x80\x46\xeb\xd4\xcc\x92\x86\x09\
\x1a\xa4\x34\xb1\x2c\x87\x25\xf3\x77\xf2\xbf\xc7\x3f\xac\xc8\xf0\
\x46\xa1\x99\x13\x14\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x05\x4f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x05\x1d\x17\x2d\x14\xad\xea\xec\xaa\x00\x00\x04\xdc\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x59\x6c\x56\x45\x18\x86\x9f\x59\xce\x39\
\xff\x52\x04\x0b\x0a\x22\x88\x0b\x6a\xd0\x1a\x88\x10\x04\xa3\x20\
\x6a\xd1\xa8\x89\x89\x1a\x2f\x5c\x48\x54\x54\x20\x42\xa2\x09\x89\
\x37\xe2\x06\x17\x1a\xac\x0a\x82\xca\xa2\x26\x8a\xc4\x8d\x60\x44\
\xad\x50\x10\x90\x06\xa9\x2d\x45\x70\xa1\x85\x5a\x84\x14\x10\xb4\
\x0a\x48\xf9\xcf\x39\xb3\x78\x71\xaa\x58\x29\x7a\xe7\x97\x4c\x26\
\x99\x64\x9e\xf9\xe6\xcd\xcc\xfb\x0a\xfe\xa5\xea\xeb\x37\xa3\x75\
\x80\x73\x86\xdf\x0e\xfd\x44\x7b\xfb\x5e\x76\xb6\x6e\x66\xd7\xee\
\xad\x08\x04\x52\x69\x4e\xed\x79\x06\x33\x9f\x78\xab\xdb\xfd\xa2\
\x7b\x68\x03\x42\x48\x82\x40\xf3\xf5\xb7\xab\xd8\xba\xad\x86\xd4\
\xc6\x78\xef\xc0\x7b\x9c\x07\x6b\x2d\xd6\x58\x8c\xb1\x94\x15\x4f\
\xe5\xc5\xd9\xd5\xff\x0d\xff\xf6\x9b\x66\x5a\x5a\xeb\xd9\xd9\xba\
\x89\xdd\x6d\xdb\x00\x47\x18\x46\x48\x21\x11\x52\xe0\xbd\xc7\x3b\
\x8f\x73\x0e\x63\x2d\x26\x35\xcc\x7d\x6e\xdd\x7f\x77\xfe\xce\x7b\
\x8b\xd8\x54\xbf\x8c\x38\x3d\x4c\x2e\x97\x27\xd0\x1a\x1d\x28\x94\
\x52\x19\x1c\xf0\x80\xf7\x1e\x6b\x2d\xde\x79\x66\x3e\xbe\xe2\xa4\
\xb2\xfe\x05\x9f\x34\x6d\x0c\xce\x19\x72\xf9\x90\x5c\x14\x11\x04\
\x01\x41\xa8\xd1\x5a\xa3\xa4\x42\x4a\x01\x42\xe2\x6c\x8a\x75\x8e\
\x61\x97\x54\x32\xb4\x62\x3c\x03\x07\x9c\x7b\x52\xb8\x06\x78\xf4\
\xb1\xdb\xf9\xa5\x7d\x0f\xf9\x7c\x44\x2e\x8a\x08\xc3\x80\x30\x0a\
\xb3\xce\xb5\x46\x4a\x09\x78\x4e\xeb\x3d\x88\xf3\x07\x8f\x64\xf8\
\xb0\x1b\xe9\xe8\x38\x4c\xcd\xda\xd7\xe9\xbc\x0c\xc0\x70\x60\x33\
\xc0\xd4\x47\xc6\x33\xb7\x6a\x65\xd6\xf9\xc4\x29\xa3\x09\x23\x45\
\x3e\x9f\x27\x17\x85\x44\x9d\x07\x04\x41\x80\x94\x82\xde\xe5\x67\
\x72\xdd\x35\x93\x29\x2f\xef\x8f\x56\x9a\x96\xd6\x46\xaa\x6b\x5e\
\xe6\xa1\x07\x16\xfa\x7f\xaa\x30\x75\xfa\x55\x1c\x3b\x5a\xa2\x47\
\xb1\x0f\x7a\xd2\xb4\xb1\x58\x9b\x64\x32\x68\x9d\xcd\x81\x06\x1c\
\x03\x07\x5c\xc4\xa8\x11\x37\x33\x70\xc0\x45\x00\x24\xc9\x31\x3e\
\xfd\x7c\x21\x3b\x5b\xea\x98\x36\xe9\x8d\x2e\xe0\xa7\x9f\xb9\x85\
\x52\x1c\xd3\xd1\x71\x8c\x20\x0c\x88\xd3\x0e\xb4\x40\xa0\xb4\x44\
\x6b\x85\x0e\x14\x52\x49\x8a\x65\xbd\x18\x3f\xee\x3e\x2e\x3c\x7f\
\x24\xce\x39\xbc\xf7\xec\xff\x69\x27\x1f\x55\x57\x21\x84\x3a\x01\
\x3c\x7b\xce\x9d\x18\xe3\x90\x32\xa0\xac\xd0\x87\xb6\x7d\xad\x98\
\xd4\xa2\xad\x4d\x09\x23\x85\x92\x0a\xbc\x67\xd4\x88\x9b\xb9\xfc\
\xb2\x5b\xc8\xe5\xf2\x38\xe7\x90\x52\xb2\x7a\xed\x22\x76\xed\xd9\
\x8a\x52\x01\x13\x27\xcc\xed\x02\x7e\x7e\xfe\xdd\x0c\x3e\x67\x24\
\xc6\x5a\x9a\x9a\xbf\xe2\xd0\xa1\x03\x68\xad\x88\x4b\x31\x1a\x91\
\xa9\x55\x2c\xf6\xe4\xa6\xeb\x27\x53\x31\xe4\x0a\x10\x0e\x21\x24\
\x47\x8e\x1c\xa4\x66\xdd\x22\xda\x7f\x6d\x43\x29\xcd\x7d\x77\xcf\
\x39\x41\xe3\x31\xa3\xef\xe0\xfb\xe6\x5a\xf6\xb4\x35\x63\x8d\x45\
\x4a\x89\x10\xd9\xa3\xd5\x52\x4a\xca\x7b\xf5\x63\xe2\x84\x2a\x0a\
\x85\x32\xac\x33\x04\x3a\xe8\x94\xe1\xf9\xec\x8d\x4b\xd5\x2d\x78\
\xe5\xea\x57\xf9\xb2\x7e\x19\xc6\x1a\x02\x15\x60\x8d\x43\x08\xd1\
\x39\x40\x0f\xad\xb8\x96\xca\xab\xef\x42\x4a\x05\x40\x18\xe6\xa8\
\x6b\x58\xce\x57\x8d\x1f\x12\x05\x39\x3c\x9e\xfb\x27\xbc\xd4\x05\
\xfc\xc3\x8f\x8d\x6c\xdc\xf4\x2e\x2d\xbb\x1a\xb2\x15\xef\x71\x78\
\xd2\xb4\x84\xd6\x11\x97\x0e\x1d\x4b\x21\x77\x3a\xa2\xb1\xf1\x6b\
\x8a\xc5\x3c\xc5\x62\x19\xf9\x7c\x8e\x0d\x1b\x97\xf2\xfd\x8e\xf5\
\xe4\xf3\x45\xb4\xd2\x4c\x99\xb8\xa0\x0b\xb8\xae\x61\x39\x0d\x5b\
\x3e\x46\xa9\x10\x6b\x0d\xc6\xa4\x08\xa1\xe9\x51\xec\xc3\x25\x17\
\x57\x52\xde\xeb\x2c\xd6\xd7\xbe\xcf\xda\x0d\xef\xa3\x9d\xcb\xcc\
\x47\xaa\x80\xe5\x2b\xaa\xd8\xbb\x7f\x3b\x51\x14\xe1\x9c\x63\xca\
\x83\x5d\xc1\x4b\x3f\x98\xc1\x96\x6d\xab\x50\x2a\xc4\x98\x14\x29\
\x15\x57\x8e\xbe\x83\x0b\x06\x5f\x4e\xd3\x8e\x4d\x7c\xfe\xc5\x12\
\x0e\x1e\xdc\xc7\x91\xa3\x47\x08\x74\x84\xb6\xd6\xe2\xbd\xe0\x93\
\xea\xf9\x34\xb5\xd4\x51\x2c\x14\xb1\xd6\x32\x7d\xf2\x92\x13\x34\
\x3e\x74\xf8\x20\xc6\x24\xf4\x3b\xfd\x3c\x2a\x86\x8c\xe3\xcc\xfe\
\x43\xd8\xde\xbc\x91\xc5\x6f\x3e\xcc\xcf\x3f\xb7\x61\x2d\x24\x69\
\x82\x33\x0e\x25\x55\xb6\x69\xf1\x1b\xcf\x52\xd7\xb8\x8c\x1e\x65\
\xa7\x90\x8b\x42\x66\xce\x58\x71\x02\x78\xde\xc2\xfb\x89\xa2\x02\
\x37\x8c\x9f\x4a\xef\xf2\x01\xac\xaf\x7d\x9b\x2d\xdb\x6a\xf0\x1e\
\x4c\x9a\x12\xc7\x09\x71\x1c\xd3\x51\x8a\x39\x76\xb4\x44\x18\x15\
\x33\x6f\xa9\x6f\xfc\x14\x6f\x25\xb3\x67\xad\xfc\xbb\x57\xfc\x05\
\x7e\x79\xf1\x43\x54\x8e\xbb\x97\xfe\xfd\x2e\x60\xed\x86\x37\x69\
\x69\xdd\x4c\xa9\x74\x34\x03\x9b\x94\x24\x49\x49\xd3\x6c\x4e\x4a\
\x09\x5a\x47\xcc\xab\xaa\xe9\x36\x2c\xfe\x84\x8b\xa7\x9e\xb9\x95\
\x61\x15\x57\x73\xed\xb8\x7b\xf8\xae\xa9\x96\x55\x6b\x16\xa0\x94\
\xc6\x7b\x70\xce\x61\x8d\x21\x31\x06\x93\xa6\x94\xe2\x84\xb8\x14\
\x93\x1a\xc7\x82\xb9\xb5\xc7\x5d\xf1\x64\x36\x7c\xe7\x6d\xb3\xc8\
\xe5\x0a\xbc\xf2\xda\x34\x7e\xff\xbd\x1d\x21\x05\xc6\x26\x38\xeb\
\x32\xb8\xb5\xa4\xc6\x90\x26\x99\x2c\x49\x62\xe9\xd7\x77\x10\x70\
\x72\xf8\xf1\x54\xda\xbe\x91\x75\xb5\x4b\x08\xb4\x46\x4a\x85\x10\
\x02\x4f\x96\x44\xd6\x1d\x8f\xb9\x38\x8e\x71\x5e\xd2\xbf\xef\xd9\
\x3c\x39\xe3\xed\x7f\xcf\x50\x80\xc7\x9e\xba\x8b\xfd\x07\x76\x10\
\x86\x51\xe7\x2f\xcd\x7e\x5e\x96\xa1\x1e\x67\xb3\x98\x4b\x13\x4b\
\x18\xe6\x99\xff\xc2\x9a\xee\xc3\xa2\xbb\xda\xb7\x6f\x07\x41\x98\
\xc7\x18\x47\x9a\x24\x80\xc7\x7b\x0f\x78\xa4\xd2\xe0\x41\x4a\x4d\
\xa1\x50\x64\xce\xec\xcf\xf8\xdf\xeb\x0f\x3f\x38\x43\x52\x10\x42\
\xfc\xab\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x9d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x05\x1d\x17\x39\x06\x70\xfd\x4a\xb7\x00\x00\x04\x2a\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x5d\x6c\x15\x45\x14\x80\xbf\x99\x9d\x99\
\xdd\x76\x8b\x02\x82\xf2\xa3\xc4\x68\x15\x4b\x48\x7c\x31\x22\x3e\
\x60\x20\xfe\x3d\x90\x20\x81\x10\x1f\x10\x13\x84\x04\x62\x24\xa1\
\x09\x52\x08\xa5\x85\xd0\xaa\x04\x0c\x15\xb1\xa1\x02\x11\x14\x09\
\x01\x51\x1f\x0c\x8a\xb5\x05\x94\xc4\x42\xa5\x14\xa8\xe1\xd7\x12\
\x4c\x41\x69\x45\x01\xdb\x7b\x77\xf7\xee\xae\x0f\xb7\xf7\x72\x29\
\x6d\xe1\xc9\x93\x6c\x26\x39\x73\xf6\x3b\x33\xe7\x9c\x39\x47\xd0\
\x8f\x34\x36\x1e\x45\x29\x4d\x14\xa5\xf8\xe7\xda\x9f\x5c\xbd\x7a\
\x89\x73\xad\x47\xb9\x70\xf1\x38\x02\x81\xb4\x14\x83\xee\x1d\xce\
\xaa\xf2\xcf\x7a\xfd\x5f\xf4\x0e\xfd\x05\x21\x24\x5a\x2b\x9a\x5b\
\xbe\xe7\xf8\x89\x5a\x82\xd0\x23\x8e\x23\x88\x63\xa2\x18\xc2\x30\
\x24\x4c\x85\xa4\x52\x21\x05\xee\x20\xaa\xd6\x7c\x7b\x67\x78\xcb\
\xc9\x33\x9c\x6f\x6d\xe4\x5c\x6b\x03\x17\xdb\x4e\x00\x11\xc6\xd8\
\x48\x21\x79\x77\xe5\x5e\x00\x16\x97\xbe\x4c\x14\x45\xa4\xc2\x90\
\x54\x90\xa2\x2b\x91\x40\x0a\xc5\xf8\x71\x53\x99\x3d\x6b\x51\xef\
\xf0\x9d\xbb\x36\xd1\xd0\xb8\x07\x2f\xb8\x8e\xe3\xe4\xa1\x95\xca\
\x02\xfb\x93\x4d\x5b\xcb\x69\xbf\x72\x89\xdf\x2e\xb4\x50\x34\x7a\
\x3c\xc5\x0b\xd6\x00\xa0\x32\x06\xf3\x16\x4c\xe0\x87\x03\x5b\x70\
\xf2\x0c\x6e\xbe\x8b\xd6\x1a\x6d\x54\x2e\x23\xee\xeb\xc6\x6d\x6d\
\x2d\xfc\x7d\xad\x03\xa5\x05\xa7\xcf\x36\x64\xf5\x12\xa0\xa4\x74\
\x06\x61\x18\xe0\x38\x06\xc7\xb6\x31\x46\x63\x3b\x86\xf2\x25\x5f\
\x89\x6e\x68\xdc\x03\x9a\xab\xa3\x6c\xe9\x2e\x1c\xdb\xc1\x71\x0c\
\x90\xe2\xad\xe2\x17\x6f\xc2\x3b\xfe\xfa\x1d\x6d\x2c\xb4\xd1\x68\
\xad\x30\xc6\x60\xb4\x66\xed\xfa\x99\x31\xc0\xb6\x1d\x25\xa2\x8f\
\x5c\x65\x9d\x18\xa3\x31\x5a\x63\x1c\x9b\x44\xf2\x3a\x0b\x17\x4d\
\x46\xce\x5b\xf0\x1c\x42\x90\x0e\x83\x52\xe9\x55\x2b\x20\xe2\xa1\
\x07\xc7\x00\xf0\xda\xab\xef\xf4\x15\xee\xac\x53\xad\x15\x4a\x2b\
\x94\x25\xd1\x46\xe3\x05\x5d\x28\x81\xc0\x52\x92\xaa\xd5\xf5\x19\
\xbb\x21\x40\xc7\xa9\x33\x0d\x8c\x7e\xec\x69\x60\x29\x71\x1c\xdf\
\x31\xa9\x5a\xdb\x48\xa9\x29\xc8\x1f\x42\xdb\xe5\x56\x52\x41\x88\
\x0c\xc3\x00\x65\x59\xb9\x76\x1d\x00\x4f\x3c\x3e\x8e\x28\x8a\x00\
\xa8\x3b\xb0\xf9\x8e\xf0\x92\xe2\x1d\x58\x96\xe2\xfa\x8d\x2b\x28\
\x65\x11\x86\x01\x12\x71\x5b\xb5\x67\xe2\x18\x5b\xdd\x4e\x9f\x9f\
\x38\x97\xbb\x91\x92\xe2\xed\x48\x29\x11\x22\x5d\x07\x52\x4a\xc9\
\xe0\x81\xc3\x32\xfb\x4f\xf5\xb0\xef\xad\xfc\x44\x7f\xaf\xbb\xa2\
\xec\x1b\x84\x10\x08\x01\xa2\xba\x66\x05\x2f\x4c\x9a\x49\x61\xe1\
\xa3\xb9\x40\xd1\x03\x2e\xfa\x39\x70\xae\x3d\xe5\x95\xd3\x28\x7c\
\xe4\x19\xf2\x9d\xfb\x11\x4d\x4d\xcd\xb8\x6e\x1e\xae\x5b\xc0\xc8\
\x91\x23\x32\xc6\x97\x81\x27\x81\x76\xc0\x7c\xbc\xf5\x4d\x7f\xee\
\xeb\x1b\xb2\xb4\xea\xcd\x73\xb1\x2c\x43\x18\xa6\x98\xff\xc6\xc6\
\x2c\xfc\xe8\xb1\x7a\x06\x0f\x1c\xc5\xc1\x43\xbb\xd9\xff\xd3\x6e\
\x54\x14\xa5\x9b\x8f\xb4\x34\x5b\xb6\x2d\x63\xf6\xac\x55\x99\x87\
\x53\xf3\x61\xcd\x9c\xa9\x52\x4a\xbf\xc0\x1d\x7c\xcb\x51\x2d\xcb\
\x90\x4a\x05\x48\x79\xb3\x10\x9a\x9a\xeb\xa9\xff\x71\x3b\xed\xed\
\x97\xb9\xd1\x79\x03\xad\x6c\xc4\xe1\xc3\x47\x70\xdd\x01\xfc\x7c\
\x64\x27\xa7\xcf\x1f\xc2\xcd\x77\x29\x5b\xf2\x65\x36\x24\xfb\xea\
\x6a\xc4\xd8\x31\x93\x18\x31\xac\x30\x0b\xda\xfd\x75\x25\x63\x8b\
\x26\x32\x72\x44\x11\x03\x0a\x06\xc6\x80\x28\xab\x98\x4c\x18\x42\
\x22\xe9\x91\xe8\x4a\x40\x6c\xa5\xaf\xb3\xf9\x93\xd5\x1c\x6e\xda\
\xc3\x80\x82\x7b\x70\x6c\x83\x6d\xdb\x94\x2e\xfe\x22\xe3\xa0\x08\
\x38\xd5\x23\xce\xb2\x7b\x0d\x33\x39\x29\xab\x98\x82\xe7\x79\x74\
\x25\x3d\x12\x9d\x49\x8c\xed\xa6\x1b\x57\x63\xd3\x5e\xe2\x50\x12\
\x04\x3e\x96\x94\x28\xa5\x72\x2b\xa4\xb7\xa4\x46\xb9\xfa\xf2\xca\
\x57\xf0\x7d\x1f\xdf\x0f\xf0\x93\x3e\x4a\xd9\x6c\x78\xbf\x36\x7d\
\x82\xea\xaa\x7a\x84\x50\x24\x12\x3e\x52\x1a\x66\x4c\x2d\xef\xaf\
\x8f\xdc\xd2\xc8\x96\x57\x4c\xc1\xf7\x7d\x92\x9e\x8f\xe7\xf9\x44\
\x31\x54\x57\xed\xbf\xe5\x7a\x6c\x5c\x7f\x90\xa1\xf7\x3d\xcc\xf4\
\x29\xa5\x74\x76\xfe\xdb\x5b\x0f\xb9\xed\x5b\xb6\x72\x32\x9e\xe7\
\x91\x4c\x7a\x78\x49\x0f\xdf\x0f\x79\x60\xe8\xa8\xbe\x27\xd1\x89\
\xe3\xbf\xd2\xdc\x52\xcb\x84\x67\xa7\x71\xec\xe4\x3e\x0e\x1c\xda\
\xce\xda\xca\xda\xdb\x8a\xfb\xed\xd2\x97\xb2\x63\xce\xf3\x3c\xa2\
\x58\x32\x7c\xe8\x28\x56\x2c\xff\xbc\xff\x19\x7a\xe6\xf4\x05\x3e\
\xdd\xb9\x8c\x3f\xae\x9c\xc5\x18\x1b\xcb\xb2\x90\x52\xb0\xee\xbd\
\x3a\x16\x2e\x9e\x44\x14\xc7\x44\x61\x7a\xcc\x05\x7e\x88\x31\x79\
\x7c\xb4\xae\xee\xee\x06\x34\xc0\x9c\xf9\xe3\xd0\x26\x8f\x28\x8e\
\x88\xa3\x10\x88\xbb\xbb\x63\x8c\xb4\x14\xc4\x20\xa5\x42\x6b\x9b\
\x0f\xd6\x7c\xc7\xff\x2e\xff\x01\xef\x1f\xa1\x47\xfa\x48\xeb\x29\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x04\x86\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x08\x07\x16\x1f\x34\xbd\x02\x81\x6e\x00\x00\x04\x13\x49\x44\
\x41\x54\x48\xc7\xbd\x95\xeb\x6f\x15\x45\x18\x87\x9f\x9d\x99\xdd\
\x3d\xa7\x87\xaa\x28\xa0\xfd\x20\x31\xc6\xc4\xf8\x07\x70\x29\x06\
\xd4\x26\x46\x8d\x46\x21\x10\x12\x89\x31\x04\xd1\x14\xd4\xc6\x96\
\x60\x0f\xf7\x42\x48\x63\x10\x2a\x50\x04\x4a\x50\x30\x0a\x58\x2f\
\x08\x11\xc3\xc5\x6a\x11\x24\x42\xa9\x14\x0a\x35\x82\x20\x04\x03\
\x51\xc4\x06\xa1\xed\x39\x7b\x99\x19\x3f\x9c\x50\xdb\xd2\xd6\x5b\
\xe2\x9b\x6c\x26\xbb\x33\xf3\xcc\xef\x9d\xf9\xcd\xbb\x0e\xfd\x44\
\x63\xe3\x11\x94\x72\x31\x26\xe6\xca\xef\xbf\xd0\xda\x7a\x91\xd3\
\x67\x8f\x70\xee\x7c\x33\x0e\x0e\x42\x2a\x06\xde\x5c\xc0\xe2\x8a\
\xf7\x7a\x9d\xef\xf4\x0e\xfd\x16\xc7\x11\xb8\xae\xe2\x58\xcb\xe7\
\x34\x1f\xaf\x23\xd2\x01\xd6\x1a\xb0\x16\x63\x41\x6b\xcd\xea\x37\
\xf6\xf3\xdc\xb4\x11\x0c\x48\x0d\x64\xc5\xd2\x5d\x37\x70\x54\xcf\
\x0f\x2d\x27\x4e\x71\xe6\x6c\x23\xa7\xcf\x1e\xe2\xfc\x85\xe3\x80\
\xc1\xf3\x7c\x96\x57\x7e\x01\x40\x7a\xc1\x63\x58\x63\x31\xc6\x00\
\x90\x97\x97\xa0\xad\xa3\x95\xe7\x5f\x1c\x45\xe1\x88\x71\x4c\x79\
\x76\x66\xef\xca\x6b\x3f\x5c\xcf\xa1\xc6\xad\x04\xd1\x55\x12\x89\
\x24\xae\x52\x28\x57\x22\xa5\x64\xe1\xec\xed\x9d\xe3\x2a\x2a\xc7\
\x52\x31\x7b\x5b\xe7\xfb\xfa\x77\x2a\xf8\xf5\xd2\x45\x7e\x3c\xd7\
\xc2\x7d\xf7\x16\x52\x56\xb2\xb4\x3b\xbc\xb8\x64\x0c\xc6\xc4\x24\
\x92\x1e\x09\xdf\xc7\x75\x5d\x5c\x4f\xa1\x94\x62\xee\xcc\x8f\x6c\
\x4f\x21\x3b\x76\xad\xe5\x89\x47\x8b\xd9\xbb\xff\x63\x1e\x1c\x3d\
\x9e\x57\xca\x1f\x22\x08\x22\x8c\x76\xa8\xa9\xde\xff\x27\x3c\x3d\
\x6f\x22\xbf\xb5\xfe\x44\x32\xe9\x93\x48\xf8\x78\x9e\x8b\xe7\x7b\
\xcc\x2f\xdf\x7a\x9d\x65\xfb\x3a\xa3\x05\x95\x4f\x62\xad\xc3\xa2\
\x39\xdb\x29\x4d\x17\xd1\xd1\x9e\xc1\x73\xf3\xa9\xae\xda\x93\x1b\
\x3c\x75\x7a\x21\x9e\x2f\x49\x26\x93\x24\x7c\x0f\xdf\xcf\x2d\x90\
\x2e\xab\xed\x09\xef\xd3\x04\x5d\x63\xea\xf4\x91\xe4\xa7\x06\x21\
\x8a\x4b\x1e\xc0\x71\xc8\x6d\x83\x52\xb9\xd6\x55\x5d\xc1\x3d\xc3\
\xae\x5c\x3b\xb9\xd7\x8e\x39\x8b\x1e\xa7\x34\x5d\x84\xeb\xb9\x04\
\x51\x07\xc2\xc1\x41\x2a\x81\x52\x12\xe5\x4a\x84\x14\xcc\x9a\xf1\
\x41\xbf\xca\x4a\x8a\x37\x5e\x3f\x83\xce\xe7\xb5\xaa\xa7\xc9\x4b\
\x0e\x60\xc8\xa0\x3b\x91\xd2\xc1\x18\x8d\xd0\x3a\x42\x49\x89\x14\
\x12\xac\xed\xdc\xe7\x38\x8e\xff\x2a\x7b\xd3\x35\x9b\x74\xd9\x16\
\xa4\x54\x5c\xbd\x76\x09\xa5\x24\x5a\x47\x38\x2f\xbc\x74\x3f\x9e\
\x2f\x59\xb5\x6c\x5f\x7f\x20\xdb\x4f\x9f\xec\xb1\x10\xa5\xe9\x22\
\xda\xae\x65\x10\x42\x08\x6e\xbd\xe5\x0e\xfe\x43\x68\x20\xd1\xed\
\xda\x3b\x0e\x8e\x03\xce\x9a\x75\x0b\x79\xb8\xe8\x19\x52\xa9\x3c\
\x0a\x0a\x0a\xfe\x8d\xf2\x6e\x0e\xaa\xa8\x1c\xcf\x3d\x77\x8f\x24\
\x2f\x31\x04\x31\x72\xd8\x58\x8c\xd1\x18\x63\x69\x6d\x6d\xfd\xc7\
\xb2\xab\x6b\xa6\x74\x82\x8f\x1c\xad\x67\xf2\xa4\x25\x18\x6d\xd8\
\xb1\x7b\x15\xc2\x18\x4d\x1c\x6b\x84\x74\xd9\xb6\xa3\x8a\xc5\x4b\
\x26\xfc\x13\xb6\x33\xba\x70\x12\x00\x4d\xc7\xea\xd9\xfb\xf5\x26\
\x6a\x36\x94\xd0\xd0\xf4\x19\xae\xf2\x71\x1a\x1a\x0e\x93\x4a\xe5\
\x73\xf0\x70\x2d\x27\xcf\x1c\x20\x95\x97\xc2\xf7\x7d\xd2\x65\xef\
\x77\x4d\xd9\xf4\x06\xbe\xd6\x76\x85\xef\x4f\x7d\xc3\xc1\xc3\xdb\
\xb8\x7c\xf9\x02\x5a\x43\x26\x1b\x90\xe9\xc8\x80\x95\xa8\xe1\xc3\
\x87\xf1\xd6\xc6\x25\x34\x34\xed\x24\x7f\xc0\x4d\x44\x51\x84\x10\
\xa2\x2b\x44\xf4\xa4\x06\x61\xd6\xd9\x77\x60\x33\x6b\xdf\x9e\x8e\
\xb5\x10\x47\x11\x20\x89\xe3\x80\x28\x8e\x89\x22\x8d\xe7\x27\x72\
\x25\xb7\xb1\x69\x27\x56\x0b\xa2\x28\x44\x0a\x81\x52\xaa\xa7\x1b\
\xba\x29\x5e\xb7\xe1\x65\xb2\xd9\xf6\x1c\x38\x8e\x08\xc3\x88\x28\
\xca\xb5\x61\x36\x44\x29\x9f\x37\xab\xea\x72\xaa\xd6\xac\xa8\xc7\
\x71\x14\x99\x4c\x88\x10\x1e\x13\xc7\x55\xd0\x7c\xe2\x60\x9f\xae\
\xe8\xe8\x68\x23\x8e\x35\x61\x10\x90\x0d\x42\xc2\x30\x24\x1b\x84\
\x04\x41\x88\xb1\xb0\x66\xc5\xde\xee\x29\xd7\x54\xef\x63\xf0\x6d\
\x77\x31\xe1\xa9\x79\xb4\xb7\xb7\xf1\xc9\xa7\xcb\xfb\xb4\x5b\x79\
\xe9\x16\x66\xcd\xa8\x25\x1b\x04\x04\x41\x40\x36\x1b\x10\x64\x03\
\xc2\x50\x73\xfb\xe0\xa1\x7d\x57\xb8\xe3\xcd\xdf\x71\xac\xa5\x8e\
\x31\xa3\xc6\x73\xf4\xc4\x1e\xbe\x3a\xb0\x89\x65\x95\x75\x37\xa4\
\xf0\xea\xbc\x47\xd0\x71\xce\x69\x41\x10\x60\xac\xa0\x60\xf0\x50\
\x16\xce\xdf\xdc\x7f\xf9\x3c\x75\xf2\x1c\xef\xd6\xce\xe5\xe7\x4b\
\x3f\xe0\x79\x3e\x52\x4a\x56\xbe\x5e\x9f\xbb\xda\xe5\x45\x18\x6b\
\x31\xda\x10\x6b\x4d\x14\x6a\x3c\x2f\xc9\xea\xe5\x5f\xfe\xbd\x1f\
\x34\xc0\xd4\x69\x23\x70\xbd\x24\xc6\x1a\xac\xd1\x80\xc5\x5a\x0b\
\x58\x84\x54\x60\x41\x08\x85\xeb\xfa\xac\x5c\xba\x9b\xff\x3d\xfe\
\x00\xec\x30\xb9\x21\x16\xc4\x68\x11\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x03\xf5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x16\x15\x0a\x2f\xe4\x9d\x1e\x91\x00\x00\x03\x82\x49\x44\
\x41\x54\x48\xc7\xbd\x93\x7d\x68\x95\x55\x1c\xc7\x3f\xe7\x79\x9e\
\xfb\xf2\xdc\xeb\x1c\xdb\x6e\xeb\x05\x82\xa8\x81\x18\x92\xfd\xe3\
\xbc\x0a\x86\xc9\xac\xa0\x60\xc5\x46\x44\xd0\x0b\x6b\x91\x33\x75\
\xe4\x7a\x21\x97\x35\x45\xb3\x9a\x90\x9b\x9a\x08\x66\x2f\x1a\x31\
\x4a\x02\x5b\x0d\x0b\xbb\x23\x09\xdd\x96\x33\xed\x85\xe6\xdd\x6a\
\x09\x7b\x1f\x96\xf7\xba\x7b\x77\x7b\x5e\x4e\x7f\xf8\x38\xef\x1e\
\x9f\xab\xf7\xaf\xbe\xf0\xe3\x70\x7e\xe7\x77\x7e\xe7\xfb\xfb\xfe\
\xce\x4f\x70\x35\x92\xc0\x1c\x0f\x3f\x9f\x1f\xda\x2f\xfa\xff\xec\
\x65\xf0\xdc\x19\x04\x02\x45\xd5\x28\x2a\xbc\x99\x2d\x4d\x07\xbd\
\xc2\x51\x5c\x7b\x09\x54\x02\x77\x03\x25\x40\xa1\x63\x51\xa0\xb1\
\xba\xaa\x26\x31\x32\xf6\x0b\x3e\xbf\x82\x50\x6d\x76\xbf\x7b\x94\
\x91\xb1\x38\xf5\x2f\x3e\x70\xdd\xe4\x12\xf0\x03\xdf\x01\xa7\x81\
\xf3\xc0\x05\x20\x01\x74\x01\xdb\x80\x95\x2d\xcd\x9d\x89\x90\x1e\
\x24\x1c\xd2\x01\x08\x85\x82\x5c\x4c\x9d\xe7\xd9\xe7\x97\xb2\xff\
\xe3\xe6\x59\xc9\x45\x56\x62\xe1\x7a\xf8\x46\x60\x14\xd0\x00\x2b\
\xcb\xbf\x18\xf8\x16\x98\x7b\xd9\xb1\xef\xa3\x26\x26\xc6\x87\xf9\
\x63\xf0\x57\xe6\xcf\x5b\xc2\xfa\x75\xdb\x67\x98\x67\x80\x79\x1e\
\x55\x4d\x3a\xab\xe5\xf2\x77\x03\x6f\x39\x84\xe8\x3c\x76\x88\xda\
\xa7\x9a\x18\x9b\x8c\xa3\xf9\x04\x7d\xf1\xae\x59\xb2\x24\x1d\x96\
\x6e\xd8\x78\x43\x02\x47\x2f\x6f\x96\x2f\xab\xe2\xf5\xad\x95\xec\
\x78\x3b\x46\x30\xe8\x07\x4c\xd6\xae\xbf\x6f\x46\x96\xb3\xc0\x52\
\x87\xa9\xe2\x5c\x96\x39\xe4\xd2\x81\xb4\x23\xc9\x05\x0f\x29\x01\
\xa8\x5d\x1d\xa5\x20\x1c\x41\x73\x98\x4f\x66\x25\xf4\x62\xea\x46\
\xa1\x57\x60\xe3\xe6\x07\x49\xa5\xd2\x4c\x4f\x1b\x64\x8c\xd4\xac\
\xdf\x22\x5c\xa6\xe4\xf0\x0b\xa7\xc9\x57\x21\xa4\xcf\xa1\x34\x72\
\x2b\xaa\x2a\xb0\x6d\x0b\x05\x28\xc8\xc1\x24\x57\x25\x00\x4b\x3c\
\x99\xbf\xd4\x46\x22\x39\x8e\xa6\xa9\x58\x96\x81\x02\x14\x3b\x9a\
\xe7\x0b\x01\xac\xcc\x75\xb8\x6d\x53\x07\x42\x08\x40\xa2\xac\x5a\
\xb7\x2c\x02\x7c\xed\x54\x90\x8d\x52\x67\x9d\xeb\xf2\x57\x01\xf5\
\xb9\x9a\x09\x20\x84\x40\x08\xd0\x16\x2e\xa8\xa0\xbf\xff\x43\x51\
\x56\x76\x87\x04\x8a\x80\x7f\x9c\x98\x71\x67\x4d\x64\xdd\xab\x06\
\x3e\xbb\xc6\xe0\xd1\xf4\x66\x15\x65\xb7\x47\x09\x05\x4b\x51\xa2\
\x8b\x1e\xc6\xb6\x2d\x86\x86\x86\x05\xf0\x37\xb0\xc2\xa9\x42\x71\
\x2c\xe0\x68\xbc\x3b\x2b\xf1\x4c\x5f\x76\xee\xad\x99\xd9\xf4\xfe\
\x14\xe3\xe9\xc7\xdf\xc1\xb6\x6c\xda\x8f\xec\x42\x9c\x3c\xd9\x8b\
\xae\xeb\x14\x15\x47\xe8\xf8\xa6\x85\x9a\x27\xb7\xc8\xeb\xe8\xed\
\x75\x2e\x4e\x9d\x8e\x11\x3b\xf6\x09\x13\x13\x23\x24\xa7\x92\x18\
\x19\x1b\xd1\xdd\xdd\x43\x38\x5c\xc0\x89\x9e\x36\xfa\x06\x7e\x20\
\x1c\x0a\x13\x08\x04\xf0\xf9\x7d\x34\xac\x39\x00\xc0\xf0\x68\x3f\
\xb7\xdc\x54\x76\xcd\x9f\xf4\xc6\xd6\x87\x84\x65\x41\x7a\x3a\x43\
\x3a\x95\x06\xa9\xa2\x94\x97\x2f\xe2\x44\xcf\x61\xba\x4f\x75\x60\
\xdb\x60\x18\x06\x86\x61\x60\x1a\x26\x5d\x3f\xb6\x33\x34\x12\x77\
\x27\xc6\x4b\xeb\x4d\x8d\xed\xd2\x34\x4d\x0c\xd3\xc4\x30\x2c\x84\
\xea\xbb\x14\x54\x57\x7f\x2f\x96\x95\x41\x0f\xf9\x09\x06\x82\xe8\
\x7a\x90\x47\x1f\xd9\xc0\x9d\xf3\xcb\x01\xd8\xf3\xfe\x1a\xea\x9e\
\xd9\x95\xef\xf4\x8a\xba\xfa\xe5\xec\x69\xe9\xbc\xc2\xe0\xb9\xb5\
\xf7\x20\x31\x29\x29\x8e\xf0\xc4\x63\x9b\x31\xad\x69\xee\x5a\x10\
\xcd\xe7\xdf\xcb\x1c\xbd\xb9\x32\xfe\x7b\x77\x7e\xcf\x0d\x25\xb7\
\x51\x5d\xb9\x91\xa9\xa9\x8b\x7c\xf1\xe5\x0e\x06\x06\x7e\xcf\x77\
\xa8\xf2\x9b\xee\x9f\xcf\xfc\xc6\xc1\x4f\x5b\x39\xf7\xd7\x10\x87\
\xbf\xfa\x80\x86\x0d\x15\x9e\x71\x2f\x6f\xbc\x9f\x86\x57\x2b\xdc\
\x09\xb3\xcd\x7b\xca\xce\xf6\x0d\x72\xa0\xed\x35\x46\xc7\xe3\xf8\
\xfd\x01\x54\x55\xa5\xb5\x39\x06\xc0\x0b\xaf\xac\xc0\x96\x12\xdb\
\xb2\x31\x2d\x0b\xe3\x5f\x8b\x7d\xef\x1d\xf7\x62\x2c\x72\x8e\x70\
\x6d\xdd\x62\x7c\x7e\x1d\x5b\xda\x48\xdb\x02\x24\x52\x5e\x22\xa5\
\xa8\x1a\x48\x50\x14\x0d\x9f\x2f\x40\xeb\xf6\x23\xfc\xef\xf8\x0f\
\xed\xf1\x3c\x24\x87\xc0\xac\xd6\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x05\xc3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xde\x05\x1d\x17\x0d\x28\x17\x01\xb4\x8f\x00\x00\x05\x50\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x7b\x6c\x97\xd5\x19\xc7\x3f\xe7\xf6\xbe\
\xbf\x5f\x7f\x2d\x62\x01\x45\x2e\xb2\x38\x91\x80\x25\x90\xcd\x08\
\xc5\x08\x2b\x03\xb6\xcc\x38\xa3\xcb\xcc\xc2\x98\x46\x07\x99\x9a\
\x49\xc2\xa2\x89\x89\x97\xe0\x25\x2e\x9b\xa4\xbb\xe0\x25\x53\x89\
\x5b\xf0\x46\x54\x44\xdd\x90\x49\xa7\x08\x54\x07\x56\x2a\x54\x90\
\x16\xba\x12\x17\x90\x9b\x68\x5b\x69\x7f\xef\x7b\x6e\xfe\xf1\xb2\
\x31\xb7\xc6\xed\xaf\x9d\xe4\xe4\xe4\x24\xe7\x7c\x9e\x93\xe7\x3c\
\xdf\xe7\x2b\xf8\x92\xd1\xd6\xb6\x03\xad\x0d\x21\x38\x3e\xed\x3d\
\xc2\x89\x13\x87\xd8\xdf\xb3\x83\x03\x1f\xee\x42\x20\x90\x4a\x73\
\xe6\x19\xe7\x70\xdf\xf2\x27\x87\xbc\x2f\x86\x86\xbe\x8b\x10\x12\
\x63\x34\x3b\x77\x6f\x64\x57\x47\x0b\xd6\x67\xc4\x18\x20\x46\x42\
\x04\xef\x3d\xde\x79\x9c\xf3\xd4\x56\xce\xe4\x37\x2b\x36\xfc\x77\
\xf8\xee\xf7\xbb\xe8\xee\x69\x63\x7f\xcf\x36\x3e\x3c\xd8\x01\x04\
\x92\x24\x45\x0a\x89\x90\x82\x18\x23\x31\x44\x42\x08\x38\xef\x71\
\xd6\x31\x30\x38\x88\x14\x9a\xc6\x19\x57\x72\xfd\x35\xb7\x0e\x0d\
\x5f\xf3\xdc\xe3\x6c\x6b\x5b\x4b\x66\xfb\x28\x95\xca\x18\xad\xd1\
\x46\xa1\x94\x2a\xe0\x40\x04\x62\x8c\x78\xef\x89\x21\x22\x54\xca\
\x84\xf1\x53\x39\x76\xf4\x10\x7f\x3b\xb0\x9b\xc9\x93\x1a\xf9\xd9\
\xd2\x15\x5f\x84\xdf\xb0\x74\x36\x21\x38\x4a\xe5\x84\x52\x9a\x62\
\x8c\xc1\x24\x1a\xad\x35\x4a\x2a\xa4\x14\x20\x24\xc1\x5b\x7c\x08\
\x4c\x9f\x3a\x9f\x69\x0d\x0b\xe8\xee\x69\xe7\xcd\x2d\xcf\xf2\x49\
\xef\x71\xb2\xcc\x12\xbc\xe0\x77\x2b\xb7\x9c\x86\xdf\x76\xe7\xd5\
\x7c\x7c\xe2\xef\x94\xcb\x29\xa5\x52\x4a\x92\x18\x92\x34\x29\x5e\
\xae\x35\x52\x4a\x20\x32\x6a\xc4\x04\x26\x9e\x7f\x31\x5f\x9f\x7e\
\x19\x03\x03\x7d\xb4\x6c\x7a\x82\x8e\x3d\x9b\x89\x51\x90\x67\x39\
\x59\x9e\x31\x70\x72\x90\xc4\xd4\xb1\xb2\xf9\xb5\x02\xbe\xf8\xa6\
\x46\x92\x54\x51\x2e\x97\x29\xa5\x09\x69\x5a\x04\x30\xc6\x20\xa5\
\x60\x44\xfd\x58\xbe\xf5\xcd\x1b\xa9\xaf\x1f\x83\x56\x9a\xee\x9e\
\x76\x36\xb4\x3c\x42\x96\x57\xf1\xce\x91\x5b\x5b\xc0\xb3\x8c\x81\
\x6a\xc6\xe0\xc9\x2a\x75\x95\x91\xe8\x1b\x96\xce\xc1\xfb\xbc\x48\
\x83\xd6\xc5\x6a\x34\x10\x18\x3f\x6e\x0a\x33\x2f\xba\x82\xf1\xe3\
\xa6\x00\x90\xe7\x83\xbc\xfa\xc6\x63\xec\xef\xde\x8e\x54\x1a\xa3\
\x0d\x02\x51\xfc\x43\x08\xf8\xe0\xd1\xd6\x62\x12\x43\x66\x07\xd0\
\x02\x81\xd2\x12\xad\x15\xda\x28\xa4\x92\x54\x6a\x87\xb3\xa0\xe9\
\xc7\x4c\x9a\x78\x31\x21\x04\x62\x8c\x1c\x3e\xb2\x9f\x57\x36\x34\
\x23\x84\x22\x2d\xd5\x10\x7c\x40\xe0\x89\x44\x42\xf0\x18\x93\x22\
\xa5\xa1\xb6\x66\x24\x07\x3f\xea\xc1\x59\x8f\xf6\xde\x92\xa4\x0a\
\x25\x15\xc4\xc8\xcc\x8b\xae\x60\xd6\x8c\xab\x28\x95\xca\x84\x10\
\x90\x52\xf2\x97\x4d\x8f\x33\xaf\x69\x49\xfc\xc7\xe7\x3f\xf6\x87\
\x9f\x0a\xa1\x8a\xaa\x09\xc1\x31\x69\xe2\x4c\x9c\xf7\x74\x76\xbd\
\x43\x6f\xef\x51\xb4\x56\x64\xd5\x8c\xa2\xbe\x04\x54\x2a\x67\x70\
\xe5\xe5\xb7\x30\xe7\x92\x85\x18\x93\x20\x84\xa4\xbf\xff\x38\x2f\
\xbc\x7c\xff\x17\xc0\x00\x4b\xae\x7d\x30\x86\x18\x38\xef\x2b\xd3\
\x99\xdd\xb8\x90\xde\xbe\x23\xec\xed\x7a\x0b\xe7\xaa\x48\x29\x11\
\xa2\x28\x5a\x2d\xa5\xa4\x7e\xf8\x68\x16\x5f\xd3\x4c\x4d\x4d\x2d\
\x3e\x38\x8c\x36\xa7\xd2\xf0\x2b\x94\x52\x43\x4a\x7b\xd1\xf7\xef\
\xe7\xad\x6d\xcf\xf1\xd7\xb6\xb5\x38\xef\x30\xca\xe0\x5d\x40\x08\
\x71\x6a\x82\x9e\xd6\x30\x8f\xf9\x73\x17\x21\x65\x01\x49\x92\x12\
\xdb\xdf\x5d\xc7\x3b\xed\x2f\x91\x9a\x12\x91\x38\x24\xfc\xa5\xf5\
\x0f\xd0\xd7\xff\x71\xb1\x89\x91\x40\xc4\xda\x2a\x5a\xa7\x7c\x6d\
\xda\x1c\x6a\x4a\x67\x21\xda\xdb\x77\x52\xa9\x94\xa9\x54\x6a\x29\
\x97\x4b\x6c\x7d\xfb\x19\x3e\xd8\xb7\x99\x72\xb9\x82\x56\x1a\xa5\
\x34\x52\x0a\x96\x5c\xfb\xd0\x3f\xa3\x3c\xb2\x6a\x89\x50\x2a\xc1\
\x7b\x87\x73\x16\x21\x34\x75\x95\x91\x4c\xbd\x70\x3e\xf5\xc3\xcf\
\x65\x73\xeb\xf3\x6c\xda\xfa\x3c\x3a\x84\xa2\xf9\x48\x65\x58\xf7\
\xc7\x66\x0e\x1d\xde\x4b\x9a\xa6\x84\x10\x08\x22\x00\x8e\x61\x75\
\x67\xd3\xdb\x77\x4c\xac\xdf\xb8\x92\xfe\x53\xaf\x75\xce\x22\xa5\
\xe2\xd2\xc6\x85\x5c\x70\xfe\x2c\x3a\xf7\x6d\xe3\x8d\x2d\x4f\x71\
\xec\xd8\x47\xf4\x9f\xec\xc7\xe8\x14\xed\xbd\x27\x46\xc1\xfa\x0d\
\x0f\xd3\xd9\xbd\x9d\x4a\x4d\x05\xef\x3d\xd2\x4b\xbc\xb7\x4c\x9d\
\x32\x87\x86\x29\x73\x79\xe1\xe5\x9f\x93\xdb\x41\x9c\xcb\x19\x7d\
\xd6\x57\x69\x98\xdc\xc4\xd8\x31\x93\xd9\xdb\xf5\x36\xab\x56\x2f\
\xe3\xf8\xf1\x83\x78\x0f\xb9\xcd\x09\x2e\xa0\xa4\x2a\x14\xba\xea\
\xf7\xbf\x64\x7b\xfb\x5a\xea\x6a\x87\xfd\x8b\x42\x13\xbe\x71\xe9\
\x0f\x19\x37\x76\x12\x2f\xbe\xf2\x0b\x84\x10\xa4\x69\x0d\xdf\x59\
\x70\x33\x23\xea\xc7\xb1\xb9\xf5\x69\xde\xeb\x68\x21\x46\x70\xd6\
\x92\xfd\x9b\x42\x93\xb4\x82\x06\x68\x6b\x7f\x95\xe8\x25\xd6\xe6\
\x28\x29\xd1\x5a\x73\xf9\xb7\x97\x61\xdd\x00\x4f\x3e\x7b\x07\xc3\
\x86\x8d\x62\x7e\xd3\xf5\x8c\x19\x7d\x01\x9b\xb6\xae\xa6\xbb\x67\
\x07\xd5\xea\xc9\x02\xec\x2c\x79\x6e\xb1\xb6\x58\xf3\x6a\x8e\xd6\
\x29\x0f\x35\xb7\x9c\xee\x8a\x3f\xb9\x79\x36\x11\xc7\x88\xfa\x91\
\xfc\xe8\x07\xf7\xe0\x7c\x95\x75\x7f\x7a\x80\xe9\x0d\x73\x99\xd7\
\x74\x1d\x7b\x3a\x5b\xd9\xf8\xfa\xa3\x28\xa5\x89\x11\x42\x08\x45\
\x5f\x71\x0e\x67\x2d\xd5\x2c\x27\xab\x66\x58\x17\x78\x74\x65\xeb\
\x7f\xf6\xf3\xdb\x97\x2f\xe4\xaa\xef\x2e\x23\x84\x8c\xf5\x1b\x1f\
\x64\xd1\xd5\xcb\x29\x95\x6a\x58\xf3\xe2\x3d\x7c\xf6\xd9\x09\x84\
\x2c\x8e\x07\x1f\x0a\xb8\xf7\x58\xe7\xb0\x79\x91\x96\x3c\xf7\x8c\
\x3e\x7b\x02\xf7\xde\xf5\xcc\xd0\x4e\xd4\xb1\x6b\x0f\x3b\x77\xb7\
\x30\x7b\xd6\xf7\x78\xef\xfd\xd7\x78\xb3\xf5\x29\x8c\xd6\x48\xa9\
\x10\x42\x10\x29\x9c\xc8\x87\xd3\x36\x97\x65\x19\x21\x4a\xce\x19\
\x75\x2e\x77\xdf\xf5\xf4\x97\x7b\x68\x57\xe7\x01\x56\xaf\xb9\x83\
\xc3\x47\xf7\x91\x24\x69\xe1\x44\xb2\x50\x5e\xe1\xa1\x91\xe0\x0b\
\x9b\xb3\xb9\x27\x49\xca\x3c\xfc\xeb\xd7\xff\x37\x83\x06\x58\x7c\
\xe3\x0c\x4c\x52\x26\xc4\x40\x0c\x1e\x88\xc4\x18\x81\x88\x54\x1a\
\x22\x48\xa9\x31\x26\xe5\xb7\x2b\xfe\xcc\xff\x7d\x7c\x0e\x7b\x3e\
\x8f\xdf\xeb\x2c\xcf\xa0\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x03\xdf\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x16\x15\x0f\x33\x8d\xeb\xb6\x9b\x00\x00\x03\x6c\x49\x44\
\x41\x54\x48\xc7\xbd\x95\x6b\x6c\x54\x45\x14\xc7\x7f\x73\xef\xdd\
\xc7\xdd\xa5\x90\xb6\x2b\x0a\x89\x09\xd1\x26\x04\x43\xd0\x2f\x94\
\x85\x04\x83\xa4\x80\x89\x26\xd5\xb4\x31\xc6\xc4\x47\x6a\x8d\x56\
\x1e\x0d\x14\x34\x50\x1e\x85\x80\x20\x25\x91\x96\x57\x48\x10\x1f\
\x60\x4c\xa3\xc4\x04\xab\x0d\x1a\xdc\x46\x42\xa4\x2d\x14\xc1\x47\
\x28\xdb\x42\x25\x69\x4b\x1f\x01\xec\xd2\x6d\xd7\xfb\x18\x3f\x70\
\xbb\x96\xe5\x6e\x59\xbe\x78\x92\xf3\xe1\xcc\x9c\xf9\xcf\x7f\xce\
\x9c\xff\x8c\xe0\x5e\x8b\x01\x13\x5c\xc6\xf9\xfa\xd8\x61\xd1\x7e\
\xb5\x95\xce\x6b\x17\x11\x08\x14\x55\x23\x7b\xd2\x14\xb6\x56\x1d\
\x75\x4b\x47\x49\x89\x25\x50\x08\x3c\x05\xe4\x02\x93\x1c\x0f\x03\
\x95\xc5\x45\x25\x83\x3d\xbd\xbf\xe3\xf1\x2a\x08\xd5\x66\xdf\x47\
\x27\xe9\xe9\x8d\x52\xbe\xfa\xd9\xfb\x82\x4b\xc0\x0b\xfc\x04\x5c\
\x00\x6e\x00\x83\x8e\x37\x01\xdb\x81\x45\x35\xd5\x8d\x83\x01\xdd\
\x4f\x30\xa0\x03\x10\x08\xf8\xb9\x1d\xbf\xc1\x5b\x4b\xe7\x71\xf8\
\xf3\xea\xbb\xc0\xc5\x18\x60\xe1\x72\x8a\xd4\x3c\x80\x39\xc0\x8f\
\xc0\xc4\xd1\x81\x43\x9f\x55\xd1\xdf\xd7\xcd\x95\xce\x3f\x98\x31\
\x7d\x2e\xab\x56\xec\x4a\x32\x4f\x00\xd3\xd3\x00\x0b\x97\x8d\x9a\
\x81\x1d\xa3\x63\x8d\xa7\x8e\x51\xfa\x7a\x15\xbd\x03\x51\x34\x8f\
\xa0\x2d\xda\x74\x57\x59\x62\xc0\xc3\x2e\x25\x13\x2e\xac\x47\x37\
\x3a\x39\x1a\x2c\x98\x5f\xc4\xc6\x6d\x85\xec\xfe\x30\x82\xdf\xef\
\x05\x4c\x96\xaf\x5a\x9c\x5c\x78\x19\x98\x07\x0c\xa4\x00\x88\x71\
\xe2\x89\xc0\xdf\x2e\x1b\x03\x50\xfa\x6e\x98\xac\x60\x08\xcd\x61\
\x3e\xe0\x92\x23\x79\x40\xab\xdc\xf2\x1c\xf1\xf8\x30\x23\x23\x06\
\x09\x23\x7e\x4f\x2b\xa6\x96\x45\xa4\x61\xa7\xb9\x2d\x08\xe8\x13\
\x98\x1c\x7a\x14\x55\x15\xd8\xb6\x85\x02\x64\x39\xbd\xfc\x20\x36\
\xd7\x95\xf9\x9a\x3a\x06\x63\x7d\x68\x9a\x8a\x65\x19\x28\x40\x8e\
\x53\xf3\x4c\x4d\x00\x8b\xd2\x4d\x6e\xdf\xdc\x80\x10\x02\x90\x28\
\xef\xac\x98\x1f\x02\xbe\x77\x4e\x40\x9a\x0e\x19\x1b\x17\x01\xe5\
\xe9\x2e\x13\x40\x08\x81\x10\xa0\x3d\x39\xb3\x80\xf6\xf6\x4f\x45\
\x5e\xde\xe3\x12\xc8\x06\x6e\x8d\xc3\xba\x18\xf8\x6a\x1c\xe1\x51\
\xf5\x41\x11\x79\x8f\x85\x09\xf8\x27\xa3\x84\x67\xbf\x80\x6d\x5b\
\x74\x75\x75\x0b\xe0\x26\xb0\xd0\x39\x85\xe2\xb8\xcf\xa9\xf1\xbe\
\x31\xc0\xc9\x8e\xda\x73\xb0\x24\x19\xb4\xfe\x1a\xe1\x8d\x57\x76\
\x62\x5b\x36\xf5\x27\xf6\x22\xce\x9d\x6b\x45\xd7\x75\xb2\x73\x42\
\x34\xfc\x50\x43\xc9\x6b\x5b\xe5\x7d\xea\xed\x36\x2f\xce\x5f\x88\
\x10\x39\xf5\x05\xfd\xfd\x3d\xc4\x86\x62\x18\x09\x1b\xd1\xdc\xdc\
\x42\x30\x98\xc5\x99\x96\x3a\xda\x3a\x4e\x13\x0c\x04\xf1\xf9\x7c\
\x78\xbc\x1e\x2a\x96\x1d\x01\xa0\xfb\x7a\x3b\x53\x1f\xc9\x1b\x57\
\x03\x9b\xb6\x3d\x2f\x2c\x0b\x86\x47\x12\x0c\xc7\x87\x41\xaa\x28\
\xf9\xf9\xb3\x39\xd3\x72\x9c\xe6\xf3\x0d\xd8\x36\x18\x86\x81\x61\
\x18\x98\x86\x49\xd3\xd9\x7a\xba\x7a\xa2\xa9\xc0\x6e\x17\xce\xe6\
\xca\x7a\x69\x9a\x26\x86\x69\x62\x18\x16\x42\xf5\xdc\x49\x2a\x2b\
\x7f\x06\xcb\x4a\xa0\x07\xbc\xf8\x7d\x7e\x74\xdd\xcf\x4b\x2f\xae\
\xe3\x89\x19\xf9\x00\x1c\xf8\x78\x19\x65\x6f\xee\x25\x43\x15\x8b\
\xb2\xf2\x05\x1c\xa8\x69\xfc\x8f\xc1\xdb\xcb\x9f\x46\x62\x92\x9b\
\x13\xe2\xd5\x97\xb7\x60\x5a\x23\xcc\x9a\x19\xce\xa4\xef\x65\xba\
\x47\x2f\x29\xff\x83\x7b\x7e\xe6\xa1\xdc\x69\x14\x17\x6e\x60\x68\
\xe8\x36\xdf\x7c\xbb\x9b\x8e\x8e\x4b\x99\x8a\x2a\xb3\x77\xe9\xb7\
\x8b\x7f\x72\xf4\xcb\x5a\xae\xfd\xd5\xc5\xf1\xef\x3e\xa1\x62\x5d\
\x81\x6b\xde\x7b\x1b\x96\x50\xb1\xb6\x20\x15\x70\xac\xbb\xab\xec\
\x72\x5b\x27\x47\xea\xd6\x73\xbd\x2f\x8a\xd7\xeb\x43\x55\x55\x6a\
\xab\x23\x00\xac\x7c\x7f\x21\xb6\x94\xd8\x96\x8d\x69\x59\x18\xff\
\x58\x1c\xda\xff\x8b\xeb\xef\x95\x56\xc2\xa5\x65\x73\xf0\x78\x75\
\x6c\x69\x23\x6d\x0b\x90\x48\x79\x87\x94\xa2\x6a\x20\x41\x51\x34\
\x3c\x1e\x1f\xb5\xbb\x4e\xf0\xbf\xdb\xbf\x2f\xa8\x34\x2c\xc2\x55\
\xf9\x39\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0d\
\x09\x85\x4f\xa2\
\x00\x6d\
\x00\x61\x00\x70\x00\x62\x00\x69\x00\x6f\x00\x67\x00\x72\x00\x61\x00\x70\x00\x68\x00\x65\x00\x72\
\x00\x0c\
\x03\x53\x1b\x07\
\x00\x72\
\x00\x65\x00\x64\x00\x5f\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0d\x22\x5f\xa7\
\x00\x6d\
\x00\x61\x00\x6e\x00\x61\x00\x67\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0a\x70\x3e\x07\
\x00\x7a\
\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x64\x00\x61\x00\x74\x00\x61\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0a\xae\xf2\x47\
\x00\x72\
\x00\x65\x00\x64\x00\x5f\x00\x70\x00\x6f\x00\x6c\x00\x79\x00\x67\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0e\xcf\x90\xa7\
\x00\x70\
\x00\x6f\x00\x6c\x00\x79\x00\x67\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0f\xa3\x01\xa7\
\x00\x63\
\x00\x6f\x00\x6c\x00\x6c\x00\x65\x00\x63\x00\x74\x00\x6f\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x01\xa9\x19\x67\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x5f\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x09\x7b\x54\xa7\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x5f\x00\x70\x00\x61\x00\x75\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\xeb\x91\xa7\
\x00\x7a\
\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x6f\x00\x75\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x02\xc9\x1a\x47\
\x00\x65\
\x00\x64\x00\x69\x00\x74\x00\x5f\x00\x6e\x00\x6f\x00\x64\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x00\x48\x59\x27\
\x00\x6c\
\x00\x69\x00\x6e\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x06\x81\x57\xa7\
\x00\x70\
\x00\x61\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x05\xd6\xa7\x47\
\x00\x6d\
\x00\x65\x00\x64\x00\x69\x00\x61\x00\x5f\x00\x70\x00\x6c\x00\x61\x00\x79\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x05\x03\x9b\x27\
\x00\x7a\
\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x69\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x00\x57\xb9\x87\
\x00\x70\
\x00\x6f\x00\x69\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x0a\x80\x3e\x47\
\x00\x7a\
\x00\x6f\x00\x6f\x00\x6d\x00\x5f\x00\x61\x00\x72\x00\x65\x00\x61\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x10\x00\x00\x00\x03\
\x00\x00\x01\x70\x00\x00\x00\x00\x00\x01\x00\x00\x2a\x67\
\x00\x00\x01\xd8\x00\x00\x00\x00\x00\x01\x00\x00\x3c\xde\
\x00\x00\x00\xee\x00\x00\x00\x00\x00\x01\x00\x00\x18\x2c\
\x00\x00\x01\x50\x00\x00\x00\x00\x00\x01\x00\x00\x25\x71\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x38\xe5\
\x00\x00\x01\x9a\x00\x00\x00\x00\x00\x01\x00\x00\x34\x5b\
\x00\x00\x01\x86\x00\x00\x00\x00\x00\x01\x00\x00\x2f\xba\
\x00\x00\x01\x32\x00\x00\x00\x00\x00\x01\x00\x00\x21\x78\
\x00\x00\x01\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x3f\
\x00\x00\x00\x6e\x00\x00\x00\x00\x00\x01\x00\x00\x08\x17\
\x00\x00\x01\xf0\x00\x00\x00\x00\x00\x01\x00\x00\x42\xa5\
\x00\x00\x00\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x0c\x1e\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x01\xdd\
\x00\x00\x00\xb2\x00\x00\x00\x00\x00\x01\x00\x00\x0e\x01\
\x00\x00\x00\xce\x00\x00\x00\x00\x00\x01\x00\x00\x12\xaf\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
tsw-apropos/mapbiographer
|
mapBiographer/resources_rc.py
|
Python
|
gpl-2.0
| 78,437
|
# -*- coding: utf-8 -*-
import os
import sys
import imp
import ntpath
import cPickle
HERE = os.path.dirname(os.path.abspath(__file__))
def latinToAscii(unicrap):
"""This takes a UNICODE string and replaces Latin-1 characters with
something equivalent in 7-bit ASCII. It returns a plain ASCII string.
This function makes a best effort to convert Latin-1 characters into
ASCII equivalents. It does not just strip out the Latin-1 characters.
All characters in the standard 7-bit ASCII range are preserved.
In the 8th bit range all the Latin-1 accented letters are converted
to unaccented equivalents. Most symbol characters are converted to
something meaningful. Anything not converted is deleted.
"""
xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y' #,
"""0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/',
0x0A:'', #New Line
0x0D:'', #Carriage Return
0xA0:'', #Non-breaking space ??
0x00:'', #Null
0x09:'', #Horizontal Tab
0x0b:'' #Vertical Tab """
}
return ''.join(xlate.get(ord(i), str(i) if ord(i) < 0x80 else '') for i in unicrap)
def logHere(*args, **kwargs):
filename = kwargs.get("filename", "log.log")
logfile = os.path.join(HERE, '..', 'logs', filename)
f = file(logfile, "a")
f.writelines("%s " % str(arg) for arg in args)
f.write("%s" % "\n" * (kwargs.get("whitespace", 0) + 1))
def hashIt(param, unhash=False):
if unhash:
return cPickle.loads(param)
return cPickle.dumps(param, 2) #binary format
def filenameFromPath(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def embeddedImport(modulename):
return imp.load_source(modulename, os.path.join(HERE, "..", "corepy", "embedded", "%s.py" % modulename))
def includeZipLib(zipfile):
sys.path.insert(0, os.path.join(HERE, "..", "libs", zipfile))
def xmlValue(xtype):
return {"string": "''",
"boolean": True,
"integer": 0,
"value": 666.0,
"memo": "''",
"set": "''",
"detail": "''",
"none": None,
"date": "date(2000, 01, 01)",
"time": "time(1, 0, 0)",
"internalid": 111,
"blob": "''"
}[str(xtype).lower()]
def escapeAnyToString(text):
integers = tuple((x, "0%s") for x in ['%d', '%i', '%o', '%u', '%x', '%X'])
decimals = tuple((x, "0%s") for x in ['%e', '%E', '%f', '%F', '%g', '%G'])
strings = tuple((x, "%s") for x in ['%c', '%r'])
for k, v in integers:
text = text.replace(k, v)
for k, v in decimals:
text = text.replace(k, v)
for k, v in strings:
text = text.replace(k, v)
return text
def isNumber(text):
try:
float(text)
return True
except TypeError:
return False
except ValueError:
return False
indent = ""
def debug(fn):
import inspect
varList, _, _, default = inspect.getargspec(fn)
d = {}
if default is not None:
d = dict((varList[-len(default):][i], v) for i, v in enumerate(default))
def f(*argt, **argd):
global indent
indent += " "
logHere(indent, ('Enter %s' % fn).center(100, '='), filename="debug.log")
d.update(dict((varList[i], v) for i, v in enumerate(argt)))
d.update(argd)
for c in d.iteritems():
logHere(indent, '%s = %s' % c, filename="debug.log")
ret = fn(*argt, **argd)
logHere(indent, 'return:', filename="debug.log")
if type(ret) == str:
logHere(indent, ret.replace("\n", "%s\n" % indent), filename="debug.log")
else:
logHere(indent, ret, filename="debug.log")
logHere(indent, ('Exit %s' % fn).center(100, '='), filename="debug.log")
indent = indent[:-4]
return ret
return f
|
ancho85/pylint-playero-plugin
|
libs/tools.py
|
Python
|
gpl-2.0
| 5,066
|
#
# Copyright 2009-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
from glob import iglob, glob
import logging
import time
import threading
import errno
import uuid
import codecs
from contextlib import nested
from functools import partial
import constants
import storage_mailbox
import blockSD
import fileSD
import sd
import misc
from misc import Event
import fileUtils
from config import config
from sdc import sdCache
import storage_exception as se
from persistentDict import DictValidator
from processPool import Timeout
from securable import Securable, unsecured
import image
from resourceFactories import IMAGE_NAMESPACE
from storageConstants import STORAGE
import resourceManager as rm
import volume
BLANK_POOL_UUID = '00000000-0000-0000-0000-000000000000'
POOL_MASTER_DOMAIN = 'mastersd'
MAX_POOL_DESCRIPTION_SIZE = 50
PMDK_DOMAINS = "POOL_DOMAINS"
PMDK_POOL_DESCRIPTION = "POOL_DESCRIPTION"
PMDK_LVER = "POOL_SPM_LVER"
PMDK_SPM_ID = "POOL_SPM_ID"
PMDK_MASTER_VER = "MASTER_VERSION"
rmanager = rm.ResourceManager.getInstance()
SPM_ACQUIRED = 'SPM'
SPM_CONTEND = 'Contend'
SPM_FREE = 'Free'
def domainListEncoder(domDict):
domains = ','.join([ '%s:%s' % (k, v) for k, v in domDict.iteritems()])
return domains
def domainListDecoder(s):
domList = {}
if not s:
return domList
for domDecl in s.split(","):
k, v = domDecl.split(':')
domList[k.strip("'")] = v.strip("'").capitalize()
return domList
SP_MD_FIELDS = {
# Key dec, enc
PMDK_DOMAINS : (domainListDecoder, domainListEncoder),
PMDK_POOL_DESCRIPTION : (str, str), # should be decode\encode utf8
PMDK_LVER : (int, str),
PMDK_SPM_ID : (int, str),
PMDK_MASTER_VER : (int, str)
}
# Calculate how many domains can be in the pool before overflowing the Metadata
MAX_DOMAINS = blockSD.SD_METADATA_SIZE - blockSD.METADATA_BASE_SIZE
MAX_DOMAINS -= MAX_POOL_DESCRIPTION_SIZE + sd.MAX_DOMAIN_DESCRIPTION_SIZE
MAX_DOMAINS -= blockSD.PVS_METADATA_SIZE
MAX_DOMAINS /= 48
class StatsThread(threading.Thread):
log = logging.getLogger('Storage.StatsThread')
onDomainConnectivityStateChange = Event("StatsThread.onDomainConnectivityStateChange")
def __init__(self, func, sdUUID):
"""
StatsThread gets two arguments on instatiation:
func - function to call
dom - argument to pass to func()
"""
threading.Thread.__init__(self)
self._statscache = dict(result=
dict(code=200, lastCheck=0.0, delay='0', valid=True))
self._statsdelay = config.getint('irs', 'sd_health_check_delay')
self._statsletrun = True
self._statsfunc = func
self._sdUUID = sdUUID
self._domain = None
def run(self):
while self._statsletrun:
try:
if self._domain is None:
self._domain = sdCache.produce(self._sdUUID)
stats, code = self._statsfunc(self._domain)
except se.StorageException, e:
self.log.error("Unexpected error", exc_info=True)
code = e.code
except Exception, e:
self.log.error("Unexpected error", exc_info=True)
code = 200
delay = 0
if self._domain is not None:
try:
# This is handled seperatly because in case of this kind
# of failure we don't want to print stack trace
delay = self._domain.getReadDelay()
except Exception, e:
self.log.error("Could not figure out delay for domain `%s` (%s)", self._sdUUID, e)
code = 200
if code != 0:
self._domain = None
finish = time.time()
stats['finish'] = finish
stats['result'] = dict(code=code, lastCheck=finish,
delay=str(delay), valid=(code == 0))
try:
if self._statscache["result"]["valid"] != stats["result"]["valid"]:
self.onDomainConnectivityStateChange.emit(self._sdUUID, stats["result"]["valid"])
except:
self.log.error("Could not emit domain state event", exc_info=True)
self._statscache.update(stats)
count = 0
while self._statsletrun and count < self._statsdelay:
count += 1
time.sleep(1)
self._statsfunc = None
def stop(self):
self._statsletrun = False
def getStatsResults(self):
return self._statscache.copy()
class StoragePool:
'''
StoragePool object should be relatively cheap to construct. It should defer
any heavy lifting activities until the time it is really needed.
'''
__metaclass__ = Securable
log = logging.getLogger('Storage.StoragePool')
storage_repository = config.get('irs', 'repository')
_poolsTmpDir = config.get('irs', 'pools_data_dir')
lvExtendPolicy = config.get('irs', 'vol_extend_policy')
def __init__(self, spUUID, taskManager):
self._domainsToUpgrade = []
self.lock = threading.Lock()
self._setUnsafe()
self.spUUID = str(spUUID)
self.poolPath = os.path.join(self.storage_repository, self.spUUID)
self.id = None
self.scsiKey = None
self.taskMng = taskManager
self._poolFile = os.path.join(self._poolsTmpDir, self.spUUID)
self.hsmMailer = None
self.spmMailer = None
self.masterDomain = None
self.repostats = {}
self.spmStarted = False
self.spmRole = SPM_FREE
@unsecured
def getSpmRole(self):
return self.spmRole
@unsecured
def getSpmLver(self):
return self.getMetaParam(PMDK_LVER)
@unsecured
def getSpmStatus(self):
#If this is the SPM no need to double check
return self.getSpmRole(), self.getSpmLver(), self.getSpmId()
def __del__(self):
if len(self.repostats) > 0:
threading.Thread(target=self.disconnectDomains).start()
@unsecured
def forceFreeSpm(self):
# DO NOT USE, STUPID, HERE ONLY FOR BC
# TODO: SCSI Fence the 'lastOwner'
self.setMetaParams({PMDK_SPM_ID: -1, PMDK_LVER: -1})
self.spmRole = SPM_FREE
def _upgradePoolDomain(self, sdUUID, isValid):
# This method is called everytime the onDomainConnectivityStateChange
# event is emited, this event is emited even when a domain goes INVALID
# if this happens there is nothing for us to do no matter what the
# domain is
if not isValid:
return
domain = sdCache.produce(sdUUID)
if sdUUID not in self._domainsToUpgrade:
return
self.log.debug("Preparing to upgrade domain %s", sdUUID)
try:
#Assumed that the domain can be attached only to one pool
targetDomVersion = self.masterDomain.getVersion()
except:
self.log.error("Error while preparing domain `%s` upgrade", sdUUID, exc_info=True)
return
with rmanager.acquireResource(STORAGE, "upgrade_" + sdUUID, rm.LockType.exclusive):
with rmanager.acquireResource(STORAGE, sdUUID, rm.LockType.exclusive):
if sdUUID not in self._domainsToUpgrade:
return
# This can never be the master
# Non data domain should not be upgraded
domClass = domain.getDomainClass()
if domClass != sd.DATA_DOMAIN:
self.log.debug("Domain `%s` is not a data domain it is an %s domain, not upgrading", sdUUID, domClass)
else:
domain.invalidateMetadata()
domVersion = domain.getVersion()
if domVersion > targetDomVersion:
self.log.critical("Found a domain with a more advanced version then the master domain")
elif domVersion < targetDomVersion:
try:
domain.upgrade(targetDomVersion)
except:
self.log.warn("Could not upgrade domain `%s`", sdUUID, exc_info=True)
return
self._domainsToUpgrade.remove(sdUUID)
if len(self._domainsToUpgrade) == 0:
self.log.debug("All domains are upgraded, unregistering from state change event")
try:
StatsThread.onDomainConnectivityStateChange.unregister(self._upgradePoolDomain)
except KeyError:
pass
@unsecured
def startSpm(self, prevID, prevLVER, scsiFencing, maxHostID, expectedDomVersion=None):
"""
Starts the SPM functionality.
:param spUUID: The UUID of the storage pool you want to manage with the SPM.
:type spUUID: UUID
:param prevID: obsolete
:param prevLVER: obsolete
:param scsiFencing: Should there be scsi fencing.?
:type scsiFencing: bool
:param maxHostID: The maximun ID of the host.?
:type maxHostID: int
.. note::
if the SPM is already started the function will fail silently.
:raises: :exc:`storage_exception.OperationInProgress` if called during an allready running connection attempt.
(makes the fact that it fails silently does not matter very much).
"""
with self.lock:
if self.spmRole == SPM_ACQUIRED:
return True
# Since we added the lock the following should NEVER happen
if self.spmRole == SPM_CONTEND:
raise se.OperationInProgress("spm start %s" % self.spUUID)
self.updateMonitoringThreads()
self.invalidateMetadata()
oldlver = self.getSpmLver()
oldid = self.getSpmId()
masterDomVersion = self.getVersion()
# If no specific domain version was specified use current master domain version
if expectedDomVersion is None:
expectedDomVersion = masterDomVersion
if masterDomVersion > expectedDomVersion:
raise se.CurrentVersionTooAdvancedError(self.masterDomain.sdUUID,
curVer=masterDomVersion, expVer=expectedDomVersion)
if int(oldlver) != int(prevLVER) or int(oldid) != int(prevID):
self.log.info("expected previd:%s lver:%s got request for previd:%s lver:%s" % (oldid, oldlver, prevID, prevLVER))
# Acquire spm lock
try:
self.spmRole = SPM_CONTEND
self.acquireClusterLock()
except:
self.spmRole = SPM_FREE
raise
self.log.debug("spm lock acquired successfully")
try:
self.lver = int(oldlver) + 1
self.invalidateMetadata()
self.setMetaParams({PMDK_LVER: self.lver,
PMDK_SPM_ID: self.id}, __securityOverride=True)
self._maxHostID = maxHostID
# Upgrade the master domain now if needed
self._upgradePool(expectedDomVersion, __securityOverride=True)
self.masterDomain.mountMaster()
self.masterDomain.createMasterTree(log=True)
self.tasksDir = os.path.join(self.poolPath, POOL_MASTER_DOMAIN, sd.MASTER_FS_DIR, sd.TASKS_DIR)
try:
# Make sure backup domain is active
self.checkBackupDomain(__securityOverride=True)
except Exception, e:
self.log.error("Backup domain validation failed, exc_info=True")
self.taskMng.loadDumpedTasks(self.tasksDir)
self.spmStarted = True
self.spmRole = SPM_ACQUIRED
# Once setSafe completes we are running as SPM
self._setSafe()
# Mailbox issues SPM commands, therefore we start it AFTER spm commands are allowed to run to prevent
# a race between the mailbox and the "self._setSafe() call"
# Create mailbox if SAN pool (currently not needed on nas)
# FIXME: Once pool contains mixed type domains (NFS + Block) the mailbox
# will have to be created if there is an active block domain in the pool
# or once one is activated
#FIXME : Use a system wide grouping mechanizm
sanPool = self.masterDomain.getStorageType() in sd.BLOCK_DOMAIN_TYPES # Check if pool is SAN or NAS
if sanPool and self.lvExtendPolicy == "ON":
self.spmMailer = storage_mailbox.SPM_MailMonitor(self, maxHostID)
self.spmMailer.registerMessageType('xtnd', partial(storage_mailbox.SPM_Extend_Message, self))
else:
self.spmMailer = None
# Restore tasks is last because tasks are spm ops (spm has to be started)
self.taskMng.recoverDumpedTasks()
self.log.debug("ended.")
except Exception, e:
self.log.error("Unexpected error", exc_info=True)
self.log.error("failed: %s" % str(e))
self.stopSpm(force=True, __securityOverride=True)
raise
@unsecured
def _shutDownUpgrade(self):
with rmanager.acquireResource(STORAGE, "upgrade_" + self.spUUID, rm.LockType.exclusive):
domains = self._domainsToUpgrade
try:
StatsThread.onDomainConnectivityStateChange.unregister(self._upgradePoolDomain)
except KeyError:
pass
requests = []
def cancelUpgrade(sdUUID, req, res):
try:
self._domainsToUpgrade.remove(sdUUID)
except ValueError:
pass
res.release()
for sdUUID in domains:
req = rmanager.registerResource(STORAGE, "upgrade_" + sdUUID, rm.LockType.exclusive, partial(cancelUpgrade, sdUUID))
requests.append(req)
for req in requests:
req.wait()
@classmethod
def __cleanupMasterMount(cls):
"""
Check whether there are any dangling master file systems still mounted
and unmount them if found.
"""
masters = os.path.join(cls.storage_repository, sd.DOMAIN_MNT_POINT,
sd.BLOCKSD_DIR, "*", sd.MASTER_FS_DIR)
for master in glob(masters):
if fileUtils.isMounted(mountPoint=master):
cls.log.debug("unmounting %s", master)
try:
blockSD.BlockStorageDomain.doUnmountMaster(master)
except se.StorageDomainMasterUnmountError, e:
misc.panic("unmount %s failed - %s" % (master, e))
else:
cls.log.debug("master `%s` is not mounted, skipping", master)
def stopSpm(self, force=False):
with self.lock:
if not force and self.getSpmRole() == SPM_FREE:
return True
self._shutDownUpgrade()
self._setUnsafe()
stopFailed = False
try:
self.__cleanupMasterMount()
except:
# If unmounting fails the vdsm panics.
stopFailed = True
try:
if self.spmMailer:
self.spmMailer.stop()
except:
# Here we are just begin polite.
# SPM will also clean this on start up.
pass
if not stopFailed:
try:
self.setMetaParam(PMDK_SPM_ID, -1)
except:
pass # The system can handle this inconsistency
try:
self.releaseClusterLock()
except:
stopFailed = True
if stopFailed:
misc.panic("Unrecoverable errors during SPM stop process.")
self.spmStarted = False
self.spmRole = SPM_FREE
def _upgradePool(self, targetDomVersion):
with rmanager.acquireResource(STORAGE, "upgrade_" + self.spUUID, rm.LockType.exclusive):
if len(self._domainsToUpgrade) > 0:
raise se.PoolUpgradeInProgress(self.spUUID)
sd.validateDomainVersion(targetDomVersion)
self.log.info("Trying to upgrade master domain `%s`", self.masterDomain.sdUUID)
with rmanager.acquireResource(STORAGE, self.masterDomain.sdUUID, rm.LockType.exclusive):
self.masterDomain.upgrade(targetDomVersion)
self.log.debug("Marking all domains for upgrade")
self._domainsToUpgrade = self.getDomains(activeOnly=True).keys()
try:
self._domainsToUpgrade.remove(self.masterDomain.sdUUID)
except ValueError:
pass
self.log.debug("Registering with state change event")
StatsThread.onDomainConnectivityStateChange.register(self._upgradePoolDomain)
self.log.debug("Running initial domain upgrade threads")
for sdUUID in self._domainsToUpgrade:
threading.Thread(target=self._upgradePoolDomain, args=(sdUUID, True), kwargs={"__securityOverride": True}).start()
@unsecured
def __createMailboxMonitor(self):
# Currently mailbox is not needed for non block device sd's
if self.hsmMailer:
return
if isinstance(self.masterDomain, blockSD.BlockStorageDomain) and self.lvExtendPolicy == "ON":
self.hsmMailer = storage_mailbox.HSM_Mailbox(self.id, self.spUUID)
@unsecured
def __cleanupDomains(self, domlist, msdUUID, masterVersion):
"""
Clean up domains after failed Storage Pool creation
domlist - comma separated list of sdUUIDs
"""
# Go through all the domains and detach them from the pool
# Since something went wrong (otherwise why would we be cleaning
# the mess up?) do not expect all the domains to exist
domains = [sdCache.produce(d) for d in domlist]
for d in domains:
try:
self.detachSD(d, msdUUID, masterVersion)
except Exception:
self.log.error("Unexpected error", exc_info=True)
self.refresh()
@unsecured
def getMasterVersion(self):
return self.getMetaParam(PMDK_MASTER_VER)
@unsecured
def acquireClusterLock(self):
self.masterDomain.acquireClusterLock(self.id)
@unsecured
def releaseClusterLock(self):
self.masterDomain.releaseClusterLock()
@unsecured
def validateAttachedDomain(self, sdUUID):
domList = self.getDomains()
if sdUUID not in domList:
raise se.StorageDomainNotInPool(self.spUUID, sdUUID)
# Avoid handle domains if not owned by pool
dom = sdCache.produce(sdUUID)
pools = dom.getPools()
if self.spUUID not in pools:
raise se.StorageDomainNotInPool(self.spUUID, sdUUID)
@unsecured
def validatePoolMVerHigher(self, masterVersion):
"""
Make sure the masterVersion higher than that of the pool.
:param masterVersion: the master version you want to validate
:type masterVersion: int
:raises: :exc:`storage_exception.StoragePoolWrongMasterVersion`
exception if masterVersion doesn't follow the rules
"""
mver = self.getMasterVersion()
if not int(masterVersion) > mver:
raise se.StoragePoolWrongMaster(self.spUUID, self.masterDomain.sdUUID)
@unsecured
def getMaximumSupportedDomains(self):
msdInfo = self.masterDomain.getInfo()
msdType = sd.name2type(msdInfo["type"])
msdVersion = int(msdInfo["version"])
if msdType in sd.BLOCK_DOMAIN_TYPES and msdVersion in blockSD.VERS_METADATA_LV:
return MAX_DOMAINS
else:
return config.getint("irs", "maximum_domains_in_pool")
@unsecured
def create(self, poolName, msdUUID, domList, masterVersion, safeLease):
"""
Create new storage pool with single/multiple image data domain.
The command will create new storage pool meta-data attach each
storage domain to that storage pool.
At least one data (images) domain must be provided
'poolName' - storage pool name
'msdUUID' - master domain of this pool (one of domList)
'domList' - list of domains (i.e sdUUID,sdUUID,...,sdUUID)
"""
self.log.info("spUUID=%s poolName=%s master_sd=%s "
"domList=%s masterVersion=%s %s",
self.spUUID, poolName, msdUUID,
domList, masterVersion, str(safeLease))
if msdUUID not in domList:
raise se.InvalidParameterException("masterDomain", msdUUID)
# Check the domains before pool creation
for dom in domList:
try:
domain = sdCache.produce(dom)
domain.validate()
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise se.StorageDomainAccessError(dom)
# Validate unattached domains
if not domain.isISO():
domain.invalidateMetadata()
spUUIDs = domain.getPools()
# Non ISO domains have only 1 pool
if len(spUUIDs) > 0:
raise se.StorageDomainAlreadyAttached(spUUIDs[0], dom)
fileUtils.createdir(self.poolPath)
try:
# Seeing as we are just creating the pool then the host doesn't
# have an assigned Id for this pool. When locking the domain we must use an Id
self.id = 1000
# Master domain is unattached and all changes to unattached domains
# must be performed under storage lock
msd = sdCache.produce(msdUUID)
msd.changeLeaseParams(safeLease)
msd.acquireClusterLock(self.id)
except:
self.id = None
raise
try:
try:
# Mark 'master' domain
# We should do it before actually attaching this domain to the pool.
# During 'master' marking we create pool metadata and each attached
# domain should register there
self.createMaster(poolName, msd, masterVersion, safeLease)
self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
# Attach storage domains to the storage pool
# Since we are creating the pool then attach is done from the hsm and not the spm
# therefore we must manually take the master domain lock
# TBD: create will receive only master domain and further attaches should be done
# under SPM
# Master domain was already attached (in createMaster),
# no need to reattach
for sdUUID in domList:
# No need to attach the master
if sdUUID == msdUUID:
continue
self.attachSD(sdUUID)
except Exception:
self.log.error("Create domain canceled due to an unexpected error", exc_info=True)
try:
fileUtils.cleanupdir(self.poolPath)
self.__cleanupDomains(domList, msdUUID, masterVersion)
except:
self.log.error("Cleanup failed due to an unexpected error", exc_info=True)
raise
finally:
msd.releaseClusterLock()
self.id = None
self.disconnectDomains()
return True
@unsecured
def _saveReconnectInformation(self, hostID, scsiKey, msdUUID, masterVersion):
pers = ["id=%d\n" % hostID]
pers.append("scsiKey=%s\n" % scsiKey)
pers.append("sdUUID=%s\n" % msdUUID)
pers.append("version=%s\n" % masterVersion)
with open(self._poolFile, "w") as f:
f.writelines(pers)
@unsecured
def connect(self, hostID, scsiKey, msdUUID, masterVersion):
"""
Connect a Host to a specific storage pool.
Caller must acquire resource Storage.spUUID so that this method would never be called twice concurrently.
"""
self.log.info("Connect host #%s to the storage pool %s with master domain: %s (ver = %s)" %
(hostID, self.spUUID, msdUUID, masterVersion))
if not os.path.exists(self._poolsTmpDir):
msg = ("StoragePoolConnectionError for hostId: %s, on poolId: %s," +
" Pools temp data dir: %s does not exist" %
(hostID, self.spUUID, self._poolsTmpDir))
self.log.error(msg)
msg = "Pools temp data dir: %s does not exist" % (self._poolsTmpDir)
raise se.StoragePoolConnectionError(msg)
if os.path.exists(self._poolFile):
os.unlink(self._poolFile)
self._saveReconnectInformation(hostID, scsiKey, msdUUID, masterVersion)
self.id = hostID
self.scsiKey = scsiKey
# Make sure SDCache doesn't have stale data (it can be in case of FC)
sdCache.refresh()
# Rebuild whole Pool
self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
self.__createMailboxMonitor()
return True
@unsecured
def disconnectDomains(self):
for sdUUID in self.repostats.keys():
self.stopRepoStats(sdUUID)
return True
@unsecured
def disconnect(self):
"""
Disconnect a Host from specific storage pool.
Caller must acquire resource Storage.spUUID so that this method would never be called twice concurrently.
"""
self.log.info("Disconnect from the storage pool %s", self.spUUID)
self.id = None
self.scsiKey = None
if os.path.exists(self._poolFile):
os.unlink(self._poolFile)
if self.hsmMailer:
self.hsmMailer.stop()
self.hsmMailer = None
# Remove all links
if os.path.exists(self.poolPath):
fileUtils.cleanupdir(self.poolPath)
self.disconnectDomains()
return True
@unsecured
def getPoolParams(self):
file = open(self._poolFile, "r")
for line in file:
pair = line.strip().split("=")
if len(pair) == 2:
if pair[0] == "id":
hostId = int(pair[1])
elif pair[0] == "scsiKey":
scsiKey = pair[1]
elif pair[0] == "sdUUID":
msdUUID = pair[1]
elif pair[0] == "version":
masterVersion = pair[1]
file.close()
return hostId, scsiKey, msdUUID, masterVersion
@unsecured
def createMaster(self, poolName, domain, masterVersion, leaseParams):
"""
Create a fresh master file system directory tree
"""
# THIS METHOD MUST BE RUN UNDER DOMAIN STORAGE LOCK
self.log.info("setting master domain for spUUID %s on sdUUID=%s", self.spUUID, domain.sdUUID)
futurePoolMD = self._getPoolMD(domain)
with futurePoolMD.transaction():
domain.changeLeaseParams(leaseParams)
for spUUID in domain.getPools():
if spUUID != self.spUUID:
self.log.warn("Force detaching from pool `%s` because of reconstruct master", spUUID)
domain.detach(spUUID)
domain.attach(self.spUUID)
domain.changeRole(sd.MASTER_DOMAIN)
futurePoolMD.update({
PMDK_SPM_ID: -1,
PMDK_LVER: -1,
PMDK_MASTER_VER: masterVersion,
PMDK_POOL_DESCRIPTION: poolName,
PMDK_DOMAINS: {domain.sdUUID: sd.DOM_ACTIVE_STATUS}})
@unsecured
def reconstructMaster(self, poolName, msdUUID, domDict, masterVersion, safeLease):
self.log.info("spUUID=%s poolName=%s"
" master_sd=%s domDict=%s masterVersion=%s "
"leaseparams=(%s)",
self.spUUID, poolName, msdUUID, domDict, masterVersion,
str(safeLease))
if msdUUID not in domDict:
raise se.InvalidParameterException("masterDomain", msdUUID)
try:
# Seeing as we are just creating the pool then the host doesn't
# have an assigned Id for this pool. When locking the domain we must use an Id
self.id = 1000
# Master domain is unattached and all changes to unattached domains
# must be performed under storage lock
futureMaster = sdCache.produce(msdUUID)
futureMaster.changeLeaseParams(safeLease)
futureMaster.acquireClusterLock(self.id)
try:
self.createMaster(poolName, futureMaster, masterVersion, safeLease)
self.refresh(msdUUID=msdUUID, masterVersion=masterVersion)
# TBD: Run full attachSD?
domains = self.getDomains()
for sdUUID in domDict:
domains[sdUUID] = domDict[sdUUID].capitalize()
# Add domain to domain list in pool metadata
self.setMetaParam(PMDK_DOMAINS, domains, __securityOverride=True)
self.log.info("Set storage pool domains: %s", domains)
finally:
# We need stop all repoStats threads that were started during reconstructMaster
self.disconnectDomains()
futureMaster.releaseClusterLock()
finally:
self.id = None
@unsecured
def copyPoolMD(self, prevMd, newMD):
prevPoolMD = self._getPoolMD(prevMd)
domains = prevPoolMD[PMDK_DOMAINS]
pool_descr = prevPoolMD[PMDK_POOL_DESCRIPTION]
lver = prevPoolMD[PMDK_LVER]
spmId = prevPoolMD[PMDK_SPM_ID]
# This is actually domain metadata, But I can't change this because of
# backward compatibility
leaseParams = prevMd.getLeaseParams()
# Now insert pool metadata into new mastersd metadata
newPoolMD = self._getPoolMD(newMD)
with newPoolMD.transaction():
newPoolMD.update({PMDK_DOMAINS: domains,
PMDK_POOL_DESCRIPTION: pool_descr,
PMDK_LVER: lver,
PMDK_SPM_ID: spmId})
newMD.changeLeaseParams(leaseParams)
@unsecured
def __masterMigrate(self, sdUUID, msdUUID, masterVersion):
curmsd = sdCache.produce(sdUUID)
newmsd = sdCache.produce(msdUUID)
self._refreshDomainLinks(newmsd)
curmsd.invalidateMetadata()
newmsd.upgrade(curmsd.getVersion())
# new 'master' should be in 'active' status
domList = self.getDomains()
if msdUUID not in domList:
raise se.StorageDomainNotInPool(self.spUUID, msdUUID)
if domList[msdUUID] != sd.DOM_ACTIVE_STATUS:
raise se.StorageDomainNotActive(msdUUID)
if newmsd.isISO():
raise se.IsoCannotBeMasterDomain(msdUUID)
if newmsd.isBackup():
raise se.BackupCannotBeMasterDomain(msdUUID)
# Copy master file system content to the new master
src = os.path.join(curmsd.domaindir, sd.MASTER_FS_DIR)
dst = os.path.join(newmsd.domaindir, sd.MASTER_FS_DIR)
# Mount new master file system
newmsd.mountMaster()
# Make sure there is no cruft left over
for dir in [newmsd.getVMsDir(), newmsd.getTasksDir()]:
fileUtils.cleanupdir(dir)
try:
fileUtils.tarCopy(src, dst, exclude=("./lost+found",))
except fileUtils.TarCopyFailed:
self.log.error("tarCopy failed", exc_info = True)
# Failed to copy the master data
try:
newmsd.unmountMaster()
except Exception:
self.log.error("Unexpected error", exc_info=True)
raise se.StorageDomainMasterCopyError(msdUUID)
self.copyPoolMD(curmsd, newmsd)
path = newmsd.getMDPath()
if not path:
newmsd.unmountMaster()
raise se.StorageDomainLayoutError("domain", msdUUID)
# Acquire safelease lock on new master
try:
# Reset SPM lock because of the host still SPM
# It will speedup new lock acquiring
newmsd.initSPMlease()
newmsd.acquireClusterLock(self.id)
except Exception:
self.log.error("Unexpected error", exc_info=True)
newmsd.releaseClusterLock()
newmsd.unmountMaster()
raise
self.log.debug("masterMigrate - lease acquired successfully")
try:
# Now mark new domain as 'master'
# if things break down here move the master back pronto
newPoolMD = self._getPoolMD(newmsd)
with newPoolMD.transaction():
newPoolMD[PMDK_MASTER_VER] = masterVersion
newmsd.changeRole(sd.MASTER_DOMAIN)
self._saveReconnectInformation(self.id, self.scsiKey, newmsd.sdUUID, masterVersion)
except Exception:
self.log.error("Unexpected error", exc_info=True)
newmsd.releaseClusterLock()
newmsd.unmountMaster()
raise
# From this point on we have a new master and should not fail
try:
# Now recreate 'mastersd' link
# we can use refresh() to do the job
self.refresh(msdUUID, masterVersion)
# From this point on there is a new master domain in the pool
# Now that we are beyond the criticial point we can clean up things
curmsd.changeRole(sd.REGULAR_DOMAIN)
# Clean up the old data from previous master fs
for dir in [curmsd.getVMsDir(), curmsd.getTasksDir()]:
fileUtils.cleanupdir(dir)
# NOTE: here we unmount the *previous* master file system !!!
curmsd.unmountMaster()
except Exception:
self.log.error("Unexpected error", exc_info=True)
try:
# Release old lease
curmsd.releaseClusterLock()
except Exception:
self.log.error("Unexpected error", exc_info=True)
@unsecured
def __unmountLastMaster(self, sdUUID):
curmsd = sdCache.produce(sdUUID)
# Check if it's last domain and allow it detaching
dl = self.getDomains(activeOnly=True)
domList = dl.keys()
if curmsd.sdUUID in domList:
domList.remove(curmsd.sdUUID)
for item in domList:
domain = sdCache.produce(item)
if domain.isData():
# Failure, we have at least one more data domain
# in the pool and one which can become 'master'
raise se.StoragePoolHasPotentialMaster(item)
curmsd.unmountMaster()
def masterMigrate(self, sdUUID, msdUUID, masterVersion):
self.log.info("sdUUID=%s spUUID=%s msdUUID=%s", sdUUID, self.spUUID, msdUUID)
# Check if we are migrating to or just unmounting last master
if msdUUID != sd.BLANK_UUID:
# TODO: is this check relevant?
self.validatePoolMVerHigher(masterVersion)
self.__masterMigrate(sdUUID, msdUUID, masterVersion)
return False # not last master
self.__unmountLastMaster(sdUUID)
return True # last master
def attachSD(self, sdUUID):
"""
Attach a storage domain to the storage pool.
This marks the storage domain as "attached" and links it
to the storage pool
'sdUUID' - storage domain UUID
"""
self.log.info("sdUUID=%s spUUID=%s", sdUUID, self.spUUID)
domains = self.getDomains()
if sdUUID in domains:
return True
if len(domains) >= self.getMaximumSupportedDomains():
raise se.TooManyDomainsInStoragePoolError()
dom = sdCache.produce(sdUUID)
dom.acquireClusterLock(self.id)
try:
#If you remove this condition, remove it from public_createStoragePool too.
if dom.isData() and (dom.getVersion() != self.masterDomain.getVersion()):
raise se.MixedSDVersionError(dom.sdUUID, dom.getVersion(), self.masterDomain.sdUUID, self.masterDomain.getVersion())
dom.attach(self.spUUID)
domains[sdUUID] = sd.DOM_ATTACHED_STATUS
self.setMetaParam(PMDK_DOMAINS, domains)
self._refreshDomainLinks(dom)
finally:
dom.releaseClusterLock()
self.updateMonitoringThreads()
def forcedDetachSD(self, sdUUID):
self.log.warn("Force detaching domain `%s`", sdUUID)
domains = self.getDomains()
if sdUUID not in domains:
return True
del domains[sdUUID]
self.setMetaParam(PMDK_DOMAINS, domains)
self._cleanupDomainLinks(sdUUID)
self.updateMonitoringThreads()
self.log.debug("Force detach for domain `%s` is done", sdUUID)
def detachSD(self, sdUUID, msdUUID, masterVersion):
"""
Detach a storage domain from a storage pool.
This removes the storage domain entry in the storage pool meta-data
and leaves the storage domain in 'unattached' status.
'sdUUID' - storage domain UUID
'msdUUID' - master storage domain UUID
'masterVersion' - new master storage domain version
"""
self.log.info("sdUUID=%s spUUID=%s msdUUID=%s", sdUUID, self.spUUID, msdUUID)
dom = sdCache.produce(sdUUID)
if dom.isISO():
dom.acquireClusterLock(self.id)
try:
dom.invalidateMetadata()
try:
# Avoid detach domains if not owned by pool
self.validateAttachedDomain(sdUUID)
domList = self.getDomains()
sd.validateSDStateTransition(sdUUID, domList[sdUUID], sd.DOM_UNATTACHED_STATUS)
# If the domain being detached is the 'master', move all pool
# metadata to the new 'master' domain (msdUUID)
if sdUUID == self.masterDomain.sdUUID:
self.masterMigrate(sdUUID, msdUUID, masterVersion, __securityOverride=True)
# Remove pool info from domain metadata
dom.detach(self.spUUID)
# Remove domain from pool metadata
del domList[sdUUID]
self.setMetaParam(PMDK_DOMAINS, domList, __securityOverride=True)
self._cleanupDomainLinks(sdUUID)
self.updateMonitoringThreads()
except Exception:
self.log.error("Unexpected error", exc_info=True)
finally:
if dom.isISO():
dom.releaseClusterLock()
def activateSD(self, sdUUID):
"""
Activate a storage domain that is already a member in a storage pool.
Validate that the storage domain is owned by the storage pool.
'sdUUID' - storage domain UUID
"""
self.log.info("sdUUID=%s spUUID=%s", sdUUID, self.spUUID)
# Avoid domain activation if not owned by pool
self.validateAttachedDomain(sdUUID)
domList = self.getDomains()
dom = sdCache.produce(sdUUID)
sd.validateSDStateTransition(sdUUID, domList[sdUUID], sd.DOM_ACTIVE_STATUS)
# Do nothing if already active
if domList[sdUUID] == sd.DOM_ACTIVE_STATUS:
return True
if dom.getDomainClass() == sd.DATA_DOMAIN:
dom.upgrade(self.getVersion())
dom.activate()
# set domains also do rebuild
domList[sdUUID] = sd.DOM_ACTIVE_STATUS
self.setMetaParam(PMDK_DOMAINS, domList)
self._refreshDomainLinks(dom)
self.updateMonitoringThreads()
return True
def deactivateSD(self, sdUUID, new_msdUUID, masterVersion):
"""
Deactivate a storage domain.
Validate that the storage domain is owned by the storage pool.
Change storage domain status to "Attached" in the storage pool meta-data.
:param sdUUID: The UUID of the storage domain you want to deactivate.
:param new_msdUUID: The UUID of the new master storage domain.
:param masterVersion: new master storage domain version
"""
self.log.info("sdUUID=%s spUUID=%s new_msdUUID=%s", sdUUID, self.spUUID, new_msdUUID)
domList = self.getDomains()
if sdUUID not in domList:
raise se.StorageDomainNotInPool(self.spUUID, sdUUID)
try:
dom = sdCache.produce(sdUUID)
#Check that dom is really reachable and not a cached value.
dom.validate(False)
except (se.StorageException, Timeout):
self.log.warn("deactivaing MIA domain `%s`", sdUUID, exc_info=True)
if new_msdUUID != BLANK_POOL_UUID:
#Trying to migrate master failed to reach actual msd.
raise se.StorageDomainAccessError(sdUUID)
else:
if dom.isMaster():
#Maybe there should be information in the exception that the UUID is
#not invalid because of its format but because it is equal to the SD. Will be less confusing.
#TODO: verify in masterMigrate().
if sdUUID == new_msdUUID:
raise se.InvalidParameterException("new_msdUUID", new_msdUUID)
self.masterMigrate(sdUUID, new_msdUUID, masterVersion)
elif dom.isBackup():
dom.unmountMaster()
domList[sdUUID] = sd.DOM_ATTACHED_STATUS
self.setMetaParam(PMDK_DOMAINS, domList)
self.updateMonitoringThreads()
@unsecured
def _linkStorageDomain(self, src, linkName):
self.log.info("Linking %s to %s", src, linkName)
try:
current = os.readlink(linkName)
except OSError, e:
if e.errno != errno.ENOENT:
self.log.error("Can't link SD %s to %s", src, linkName, exc_info=True)
return
else:
if current == linkName:
return #Nothing to do
#Rebuid the link
tmp_link_name = os.path.join(self.storage_repository, str(uuid.uuid4()))
os.symlink(src, tmp_link_name) #make tmp_link
os.rename(tmp_link_name, linkName)
@unsecured
def _cleanupDomainLinks(self, domain):
linkPath = os.path.join(self.poolPath, domain)
try:
os.remove(linkPath)
except (OSError, IOError):
pass
@unsecured
def _refreshDomainLinks(self, domain):
domain.refreshDirTree()
linkName = os.path.join(self.poolPath, domain.sdUUID)
self._linkStorageDomain(domain.domaindir, linkName)
if self.masterDomain.sdUUID == domain.sdUUID:
masterName = os.path.join(self.poolPath, POOL_MASTER_DOMAIN)
self._linkStorageDomain(domain.domaindir, masterName)
else:
domPoolMD = self._getPoolMD(domain)
with domPoolMD.transaction():
domain.changeRole(sd.REGULAR_DOMAIN)
domPoolMD[PMDK_MASTER_VER] = 0
@unsecured
def __rebuild(self, msdUUID, masterVersion):
"""
Rebuild storage pool.
"""
# master domain must be refreshed first
self.masterDomain = self.getMasterDomain(msdUUID=msdUUID, masterVersion=masterVersion)
self.updateMonitoringThreads()
fileUtils.createdir(self.poolPath)
# Find out all domains for future cleanup
domainpat = os.path.join(self.poolPath, constants.UUID_GLOB_PATTERN)
oldLinks = set(iglob(domainpat))
# We should not rebuild non-active domains, because
# they are probably disconnected from the host
domUUIDs = self.getDomains(activeOnly=True).keys()
#msdUUID should be present and active in getDomains result.
#TODO: Consider remove if clause.
if msdUUID in domUUIDs:
domUUIDs.remove(msdUUID)
#TODO: Consider to remove this whole block. UGLY!
#We want to avoid lookups (vgs) of unknown block domains.
#domUUIDs includes all the domains, file or block.
block_mountpoint = os.path.join(sd.StorageDomain.storage_repository,
sd.DOMAIN_MNT_POINT, sd.BLOCKSD_DIR)
blockDomUUIDs = [vg.name for vg in blockSD.lvm.getVGs(domUUIDs)]
domDirs = {} # {domUUID: domaindir}
#Add the block domains
for domUUID in blockDomUUIDs:
domaindir = os.path.join(block_mountpoint, domUUID)
domDirs[domUUID] = domaindir
# create domain special volumes folder
fileUtils.createdir(os.path.join(domaindir, sd.DOMAIN_META_DATA))
fileUtils.createdir(os.path.join(domaindir, sd.DOMAIN_IMAGES))
#Add the file domains
for domUUID, domaindir in fileSD.scanDomains(): #[(fileDomUUID, file_domaindir)]
domDirs[domUUID] = domaindir
#Link all the domains to the pool
for domUUID, domaindir in domDirs.iteritems():
linkName = os.path.join(self.poolPath, domUUID)
self._linkStorageDomain(domaindir, linkName)
oldLinks.discard(linkName)
# Always try to build master links
try:
self._refreshDomainLinks(self.masterDomain)
except (se.StorageException, OSError):
self.log.error("_refreshDomainLinks failed for master domain %s", self.masterDomain.sdUUID, exc_info=True)
linkName = os.path.join(self.poolPath, self.masterDomain.sdUUID)
oldLinks.discard(linkName)
# Cleanup old trash from the pool
for oldie in oldLinks:
try:
os.remove(oldie)
except OSError as e:
if e.errno != errno.ENOENT:
self.log.warn("Could not clean all trash from the pool dom `%s` (%s)", oldie, e)
except Exception as e:
self.log.warn("Could not clean all trash from the pool dom `%s` (%s)", oldie, e)
@unsecured
def refresh(self, msdUUID=None, masterVersion=None):
"""
Refresh storage pool.
'msdUUID' - master storage domain UUID
"""
sdCache.refresh()
self.__rebuild(msdUUID=msdUUID, masterVersion=masterVersion)
def updateVM(self, vmList, sdUUID=None):
"""
Update VMs.
'vmList' - [{'vm':vmUUID,'ovf','imglist':'imgUUID1,imgUUID2,...'},...]
'sdUUID' - target domain UUID, if not None, VM Images and the master tree
must be located on this domain.
If sdUUID is None, the update is on the pool, and therefore the
master domain will be updated.
"""
if sdUUID is None:
sdUUID = self.masterDomain.sdUUID
self.log.info("spUUID=%s sdUUID=%s", self.spUUID, sdUUID)
vms = self._getVMsPath(sdUUID)
# We should exclude 'masterd' link from IMG_METAPATTERN globing
vmUUID = ovf = imgList = ''
for vm in vmList:
if not vm:
continue
try:
vmUUID = vm['vm']
ovf = vm['ovf']
imgList = vm['imglist'].split(',')
self.log.info("vmUUID=%s imgList=%s", vmUUID, str(imgList))
except KeyError:
raise se.InvalidParameterException("vmList", str(vmList))
vmPath = os.path.join(vms, vmUUID)
if fileUtils.pathExists(vmPath):
fileUtils.cleanupdir(vmPath, ignoreErrors = False)
try:
os.mkdir(vmPath)
codecs.open(os.path.join(vmPath, vmUUID + '.ovf'), 'w',
encoding='utf8').write(ovf)
except OSError, ex:
if ex.errno == errno.ENOSPC:
raise se.NoSpaceLeftOnDomain(sdUUID)
raise
def removeVM(self, vmList, sdUUID=None):
"""
Remove VMs.
'vmList' - vmUUID1,vmUUID2,...
"""
self.log.info("spUUID=%s vmList=%s sdUUID=%s", self.spUUID, str(vmList), sdUUID)
vms = self._getVMsPath(sdUUID)
vmUUIDs = vmList.split(',')
for vm in vmUUIDs:
if os.path.exists(os.path.join(vms, vm)):
fileUtils.cleanupdir(os.path.join(vms, vm))
def setDescription(self, descr):
"""
Set storage pool description.
'descr' - pool description
"""
if len(descr) > MAX_POOL_DESCRIPTION_SIZE:
raise se.StoragePoolDescriptionTooLongError()
self.log.info("spUUID=%s descr=%s", self.spUUID, descr)
self.setMetaParam(PMDK_POOL_DESCRIPTION, descr)
def extendVolume(self, sdUUID, volumeUUID, size, isShuttingDown=None):
sdCache.produce(sdUUID).extendVolume(volumeUUID, size, isShuttingDown)
@classmethod
def _getPoolMD(cls, domain):
# This might look disgusting but this makes it so that
# This is the only intrusion needed to satisfy the
# unholy union between pool and SD metadata
return DictValidator(domain._metadata._dict, SP_MD_FIELDS)
@property
def _metadata(self):
return self._getPoolMD(self.masterDomain)
@unsecured
def getDescription(self):
try:
return self.getMetaParam(PMDK_POOL_DESCRIPTION)
# There was a bug that cause pool description to
# disappear. Returning "" might be ugly but it keeps
# everyone happy.
except KeyError:
return ""
@unsecured
def getVersion(self):
return self.masterDomain.getVersion()
@unsecured
def getSpmId(self):
spmid = self.getMetaParam(PMDK_SPM_ID)
if spmid != self.id or self.spmRole != SPM_FREE:
return spmid
# If we claim to be the SPM we have to be really sure we are
self.invalidateMetadata()
return self.getMetaParam(PMDK_SPM_ID)
@unsecured
def getInfo(self):
"""
Get storage pool info.
"""
##self.log.info("Get info of the storage pool %s",
## self.spUUID)
if not self.spUUID:
raise se.StoragePoolInternalError
info = {'type': '', 'name': '', 'domains': '', 'master_uuid': '', 'master_ver': 0,
'lver': -1, 'spm_id': -1, 'isoprefix': '', 'pool_status': 'uninitialized', 'version': -1}
list_and_stats = {}
msdUUID = self.masterDomain.sdUUID
try:
msdInfo = self.masterDomain.getInfo()
except Exception:
self.log.error("Couldn't read from master domain", exc_info=True)
raise se.StoragePoolMasterNotFound(self.spUUID, msdUUID)
try:
info['type'] = msdInfo['type']
info['domains'] = domainListEncoder(self.getDomains())
info['name'] = self.getDescription()
info['spm_id'] = self.getSpmId()
info['lver'] = self.getMetaParam(PMDK_LVER)
info['master_uuid'] = msdInfo['uuid']
info['master_ver'] = self.getMasterVersion()
info['version'] = str(self.getVersion())
except Exception:
self.log.error("Pool metadata error", exc_info=True)
raise se.StoragePoolActionError(self.spUUID)
# Get info of all pool's domains
domDict = self.getDomains()
repoStats = self.getRepoStats()
for item in domDict:
# Return statistics for active domains only
stats = {}
alerts = []
if domDict[item] == sd.DOM_ACTIVE_STATUS:
try:
dom = sdCache.produce(item)
if dom.isISO():
info['isoprefix'] = os.path.join(self.poolPath, item,
sd.DOMAIN_IMAGES, sd.ISO_IMAGE_UUID)
except:
self.log.warn("Could not get full domain information, it is probably unavailable", exc_info=True)
if item in repoStats:
try:
# For unreachable domains repoStats will return disktotal/diskfree as None.
# We should drop these parameters in this case
if repoStats[item]['disktotal'] != None and repoStats[item]['diskfree'] != None:
stats['disktotal'] = repoStats[item]['disktotal']
stats['diskfree'] = repoStats[item]['diskfree']
if not repoStats[item]['mdavalid']:
alerts.append({'code':se.SmallVgMetadata.code,
'message':se.SmallVgMetadata.message})
self.log.warning("VG %s's metadata size too small %s",
dom.sdUUID, repoStats[item]['mdasize'])
if not repoStats[item]['mdathreshold']:
alerts.append({'code':se.VgMetadataCriticallyFull.code,
'message':se.VgMetadataCriticallyFull.message})
self.log.warning("VG %s's metadata size exceeded critical size: \
mdasize=%s mdafree=%s", dom.sdUUID,
repoStats[item]['mdasize'], repoStats[item]['mdafree'])
except KeyError:
# We might have been asked to run before the first repoStats cycle was run
if item not in self.repostats:
self.log.warn("RepoStats is not active for active domain `%s`", item)
try:
stats.update(sdCache.produce(item).getStats())
except:
self.log.error("Could not get information for domain %s", item, exc_info=True)
# Domain is unavailable and we have nothing in the cache
# We need to return both of them or none
stats.pop('disktotal', None)
stats.pop('diskfree', None)
stats['alerts'] = alerts
stats['status'] = domDict[item]
list_and_stats[item] = stats
info["pool_status"] = "connected"
return dict(info=info, dominfo=list_and_stats)
@unsecured
def getIsoDomain(self):
"""
Get pool's ISO domain if active
"""
domDict = self.getDomains(activeOnly=True)
for item in domDict:
try:
dom = sdCache.produce(item)
except se.StorageDomainDoesNotExist :
self.log.warn("Storage domain %s does not exist", item)
continue
if dom.isISO():
return dom
return None
def setMetaParams(self, params):
self._metadata.update(params)
def setMetaParam(self, key, value):
"""
Set key:value in pool metadata file
"""
self._metadata[key] = value
@unsecured
def getMetaParam(self, key):
"""
Get parameter from pool metadata file
"""
return self._metadata[key]
@unsecured
def getMasterDomain(self, msdUUID, masterVersion):
"""
Get the (verified) master domain of this pool.
'msdUUID' - expected master domain UUID.
'masterVersion' - expected pool msd version.
"""
try:
domain = sdCache.produce(msdUUID)
except se.StorageDomainDoesNotExist:
#Manager should start reconstructMaster if SPM.
raise se.StoragePoolMasterNotFound(self.spUUID, msdUUID)
if not domain.isMaster():
self.log.error("Requested master domain %s is not a master domain at all", msdUUID)
raise se.StoragePoolWrongMaster(self.spUUID, msdUUID)
pools = domain.getPools()
if (self.spUUID not in pools):
self.log.error("Requested master domain %s does not belong to pool %s", msdUUID, self.spUUID)
raise se.StoragePoolWrongMaster(self.spUUID, msdUUID)
version = self._getPoolMD(domain)[PMDK_MASTER_VER]
if version != int(masterVersion):
self.log.error("Requested master domain %s does not have expected version %s it is version %s",
msdUUID, masterVersion, version)
raise se.StoragePoolWrongMaster(self.spUUID, msdUUID)
self.log.debug("Master domain %s verified, version %s", msdUUID, masterVersion)
return domain
@unsecured
def invalidateMetadata(self):
if not self.spmStarted:
self._metadata.invalidate()
@unsecured
@misc.samplingmethod
def updateMonitoringThreads(self):
# domain list it's list of sdUUID:status
# sdUUID1:status1,sdUUID2:status2,...
self.invalidateMetadata()
activeDomains = self.getDomains(activeOnly=True)
monitoredDomains = self.repostats.keys()
for sdUUID in monitoredDomains:
if sdUUID not in activeDomains:
try:
self.stopRepoStats(sdUUID)
self.log.debug("sp `%s` stopped monitoring domain `%s`" % (self.spUUID, sdUUID))
except se.StorageException:
self.log.error("Unexpected error while trying to stop monitoring domain `%s`", sdUUID, exc_info=True)
for sdUUID in activeDomains:
if sdUUID not in monitoredDomains:
try:
self.startRepoStats(sdUUID)
self.log.debug("sp `%s` started monitoring domain `%s`" % (self.spUUID, sdUUID))
except se.StorageException:
self.log.error("Unexpected error while trying to monitor domain `%s`", sdUUID, exc_info=True)
@unsecured
def getDomains(self, activeOnly=False):
return dict((sdUUID, status) \
for sdUUID, status in self.getMetaParam(PMDK_DOMAINS).iteritems() \
if not activeOnly or status == sd.DOM_ACTIVE_STATUS)
def checkBackupDomain(self):
domDict = self.getDomains(activeOnly=True)
for sdUUID in domDict:
dom = sdCache.produce(sdUUID)
if dom.isBackup():
dom.mountMaster()
# Master tree should be exist in this point
# Recreate it if not.
dom.createMasterTree()
@unsecured
def getImageDomainsList(self, imgUUID, datadomains=True):
"""
Get list of all domains in the pool that contain imgUUID
'imgUUID' - image UUID
"""
# TODO: get rid of this verb and let management query each domain separately
# the problem with current implementation is that when a domain is not accesible
# the error must be ignored and management can reach wrong conclusions.
domainsdict = self.getDomains(activeOnly=True)
domainslist = []
for dom in domainsdict:
try:
d = sdCache.produce(dom)
except Exception:
# Pass over invisible active domains
self.log.error("Unexpected error", exc_info=True)
continue
if datadomains and not d.isData():
continue
imageslist = d.getAllImages()
if imgUUID in imageslist:
domainslist.append(dom)
return domainslist
@unsecured
def isMember(self, sdUUID, checkActive=False):
"""
Check if domain is memeber in the pool.
"""
return sdUUID in self.getDomains(activeOnly=checkActive)
@unsecured
def isActive(self, sdUUID):
return sdUUID in self.getDomains(activeOnly=True)
# TODO : move to sd.py
@unsecured
def _getVMsPath(self, sdUUID):
"""
Return general path of VMs from the pool.
If 'sdUUID' is given then return VMs dir within it.
"""
if sdUUID and sdUUID != sd.BLANK_UUID:
if not self.isActive(sdUUID):
raise se.StorageDomainNotActive(sdUUID)
vmPath = sdCache.produce(sdUUID).getVMsDir()
# Get VMs path from the pool (from the master domain)
else:
vmPath = self.masterDomain.getVMsDir()
if not os.path.exists(vmPath):
raise se.VMPathNotExists(vmPath)
return vmPath
@unsecured
def check(self):
poolstatus = 0
baddomains = {}
message = "Pool OK"
try:
self.invalidateMetadata()
spmId = self.getMetaParam(PMDK_SPM_ID)
domains = self.getDomains(activeOnly=True)
for dom in domains:
d = sdCache.produce(dom)
domstatus = d.checkDomain(spUUID=self.spUUID)
if domstatus["domainstatus"] != 0:
baddomains[dom] = domstatus
poolstatus = se.StoragePoolCheckError.code
message = "Pool has bad domains"
except se.StorageException, e:
poolstatus = e.code
message = str(e)
except:
poolstatus = se.StorageException.code
message = "Pool is bad"
return dict(poolstatus = poolstatus, baddomains = baddomains,
masterdomain = self.masterDomain.sdUUID, spmhost=spmId,
message = message)
@unsecured
def _repostats(self, domain):
# self.selftest() should return True if things are looking good
# and False otherwise
stats = { 'disktotal' : None,
'diskfree' : None,
'masterValidate' : { 'mount' : False, 'valid' : False }
}
code = 0
try:
domain.selftest()
res = domain.getStats()
stats.update(res)
# Add here more selftests if needed
# Fill stats to get it back to the caller
# Keys 'finish' and 'result' are reserved and may not be used
stats['masterValidate'] = domain.validateMaster()
except se.StorageException, e:
code = e.code
except (OSError, Timeout):
code = se.StorageDomainAccessError.code
return stats, code
@unsecured
def startRepoStats(self, sdUUID):
statthread = self.repostats.get(sdUUID)
if not statthread:
statthread = StatsThread(self._repostats, sdUUID)
statthread.start()
self.repostats[sdUUID] = statthread
self.log.debug("%s stat %s", sdUUID, statthread)
@unsecured
def stopRepoStats(self, domain):
statthread = self.repostats.pop(domain, None)
if statthread:
statthread.stop()
self.log.debug("%s stat %s", domain, statthread)
@unsecured
def getRepoStats(self):
repostats = self.repostats.copy()
result = {}
for d in repostats:
result[d] = repostats[d].getStatsResults()
return result
def copyImage(self, sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, descr, dstSdUUID, volType, volFormat, preallocate, postZero, force):
"""
Creates a new template/volume from VM.
It does this it by collapse and copy the whole chain (baseVolUUID->srcVolUUID).
:param sdUUID: The UUID of the storage domain in which the image resides.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool in which the image resides.
:type spUUID: UUID
:param vmUUID: The UUID of the virtual machine you want to copy from.
:type vmUUID: UUID
:param srcImageUUID: The UUID of the source image you want to copy from.
:type srcImageUUID: UUID
:param srcVolUUID: The UUID of the source volume you want to copy from.
:type srcVolUUID: UUID
:param dstImageUUID: The UUID of the destination image you want to copy to.
:type dstImageUUID: UUID
:param dstVolUUID: The UUID of the destination volume you want to copy to.
:type dstVolUUID: UUID
:param descr: The human readable description of the new template.
:type descr: str
:param dstSdUUID: The UUID of the destination storage domain you want to copy to.
:type dstSdUUID: UUID
:param volType: The volume type of the volume being copied to.
:type volType: some enum?!
:param volFormat: The format of the volume being copied to.
:type volFormat: some enum?!
:param preallocate: Should the data be preallocated.
:type preallocate: bool
:param postZero: ?
:type postZero: ?
:param force: Should the copy be forced.
:type force: bool
:returns: a dict containing the UUID of the newly created image.
:rtype: dict
"""
srcImageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
if dstSdUUID not in [sdUUID, sd.BLANK_UUID]:
dstImageResourcesNamespace = sd.getNamespace(dstSdUUID, IMAGE_NAMESPACE)
else:
dstImageResourcesNamespace = srcImageResourcesNamespace
with nested(rmanager.acquireResource(srcImageResourcesNamespace, srcImgUUID, rm.LockType.shared),
rmanager.acquireResource(dstImageResourcesNamespace, dstImgUUID, rm.LockType.exclusive)):
repoPath = os.path.join(self.storage_repository, self.spUUID)
dstUUID = image.Image(repoPath).copy(sdUUID, vmUUID, srcImgUUID,
srcVolUUID, dstImgUUID, dstVolUUID, descr, dstSdUUID,
volType, volFormat, preallocate, postZero, force)
return dict(uuid=dstUUID)
def moveImage(self, srcDomUUID, dstDomUUID, imgUUID, vmUUID, op, postZero, force):
"""
Moves or Copys an image between storage domains within same storage pool.
:param spUUID: The storage pool where the operation will take place.
:type spUUID: UUID
:param srcDomUUID: The UUID of the storage domain you want to copy from.
:type srcDomUUID: UUID
:param dstDomUUID: The UUID of the storage domain you want to copy to.
:type dstDomUUID: UUID
:param imgUUID: The UUID of the image you want to copy.
:type imgUUID: UUID
:param vmUUID: The UUID of the vm that owns the images. ?
:type vmUUID: UUID
:param op: The operation code?
:type op: some enum?
:param postZero: ?
:param force: Should the operation be forced.
:type force: bool
"""
srcImageResourcesNamespace = sd.getNamespace(srcDomUUID, IMAGE_NAMESPACE)
dstImageResourcesNamespace = sd.getNamespace(dstDomUUID, IMAGE_NAMESPACE)
# For MOVE_OP acqure exclusive lock
# For COPY_OP shared lock is enough
if op == image.MOVE_OP:
srcLock = rm.LockType.exclusive
elif op == image.COPY_OP:
srcLock = rm.LockType.shared
else:
raise se.MoveImageError(imgUUID)
with nested(rmanager.acquireResource(srcImageResourcesNamespace, imgUUID, srcLock),
rmanager.acquireResource(dstImageResourcesNamespace, imgUUID, rm.LockType.exclusive)):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).move(srcDomUUID, dstDomUUID, imgUUID, vmUUID, op, postZero, force)
def moveMultipleImages(self, srcDomUUID, dstDomUUID, imgDict, vmUUID, force):
"""
Moves multiple images between storage domains within same storage pool.
:param spUUID: The storage pool where the operation will take place.
:type spUUID: UUID
:param srcDomUUID: The UUID of the storage domain you want to copy from.
:type srcDomUUID: UUID
:param dstDomUUID: The UUID of the storage domain you want to copy to.
:type dstDomUUID: UUID
:param imgDict: A dict of images in for form of ``{somthing:idunno}``
:type imgDict: dict
:param vmUUID: The UUID of the vm that owns the images.
:type vmUUID: UUID
:param force: Should the operation be forced.
:type force: bool
"""
srcImageResourcesNamespace = sd.getNamespace(srcDomUUID, IMAGE_NAMESPACE)
dstImageResourcesNamespace = sd.getNamespace(dstDomUUID, IMAGE_NAMESPACE)
imgList = imgDict.keys()
imgList.sort()
resourceList = []
for imgUUID in imgList:
resourceList.append(rmanager.acquireResource(srcImageResourcesNamespace, imgUUID, rm.LockType.exclusive))
resourceList.append(rmanager.acquireResource(dstImageResourcesNamespace, imgUUID, rm.LockType.exclusive))
with nested(*resourceList):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).multiMove(srcDomUUID, dstDomUUID, imgDict, vmUUID, force)
def deleteImage(self, sdUUID, imgUUID, postZero, force):
"""
Deletes an Image folder with all it's volumes.
:param sdUUID: The UUID of the storage domain that contains the images.
:type sdUUID: UUID
:param imgUUID: The UUID of the image you want to delete.
:type imgUUID: UUID
:param postZero: ?
:param force: Should the operation be forced.
:type force: bool
"""
volParams = None
repoPath = os.path.join(self.storage_repository, self.spUUID)
if sdCache.produce(sdUUID).isBackup():
# Pre-delete requisites
volParams = image.Image(repoPath).preDeleteHandler(sdUUID=sdUUID, imgUUID=imgUUID)
# Delete required image
image.Image(repoPath).delete(sdUUID=sdUUID, imgUUID=imgUUID, postZero=postZero, force=force)
# We need create 'fake' image instead of deleted one
if volParams:
image.Image(repoPath).createFakeTemplate(sdUUID=sdUUID, volParams=volParams)
def mergeSnapshots(self, sdUUID, vmUUID, imgUUID, ancestor, successor, postZero):
"""
Merges the source volume to the destination volume.
:param sdUUID: The UUID of the storage domain that contains the images.
:type sdUUID: UUID
:param spUUID: The UUID of the storage pool that contains the images.
:type spUUID: UUID
:param imgUUID: The UUID of the new image you will be created after the merge.?
:type imgUUID: UUID
:param ancestor: The UUID of the source volume.?
:type ancestor: UUID
:param successor: The UUID of the destination volume.?
:type successor: UUID
:param postZero: ?
:type postZero: bool?
"""
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).merge(sdUUID, vmUUID, imgUUID, ancestor, successor, postZero)
def createVolume(self, sdUUID, imgUUID, size, volFormat, preallocate, diskType, volUUID=None,
desc="", srcImgUUID=volume.BLANK_UUID, srcVolUUID=volume.BLANK_UUID):
"""
Creates a new volume.
.. note::
If the *imgUUID* is **identical** to the *srcImgUUID* the new volume
will be logically considered a snapshot of the old volume.
If the *imgUUID* is **different** from the *srcImgUUID* the old volume
will be logically considered a template of the new volume.
:param sdUUID: The UUID of the storage domain that contains the volume.
:type sdUUID: UUID
:param imgUUID: The UUID of the image that id that the new volume will have.
:type imgUUID: UUID
:param size: The size of the new volume in bytes.
:type size: int
:param volFormat: The format of the new volume.
:type volFormat: some enum ?!
:param preallocate: Should the volume be preallocated.
:type preallocate: bool
:param diskType: The disk type of the new volume.
:type diskType: some enum ?!
:param volUUID: The UUID of the new volume that will be created.
:type volUUID: UUID
:param desc: A human readable description of the new volume.
:param srcImgUUID: The UUID of the image that resides on the volume that will be the base of the new volume.
:type srcImgUUID: UUID
:param srcVolUUID: The UUID of the volume that will be the base of the new volume.
:type srcVolUUID: UUID
:returns: a dicts with the UUID of the new volume.
:rtype: dict
"""
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive):
uuid = sdCache.produce(sdUUID).createVolume(imgUUID=imgUUID, size=size,
volFormat=volFormat, preallocate=preallocate,
diskType=diskType, volUUID=volUUID, desc=desc,
srcImgUUID=srcImgUUID, srcVolUUID=srcVolUUID)
return dict(uuid=uuid)
def deleteVolume(self, sdUUID, imgUUID, volumes, postZero, force):
"""
Deletes a given volume.
.. note::
This function assumes:
* If more than 1 volume, all volumes are a part of the **same** chain.
* Given volumes are ordered, so predecessor is deleted before ancestor. ? (might be confused?)
:param sdUUID: The UUID of the storage domain that contains the volume.
:type sdUUID: UUID
:param imgUUID: The UUID of the image that id that the new volume will have.
:type imgUUID: UUID
"""
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive):
for volUUID in volumes:
sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID).delete(postZero=postZero,
force=force)
def setMaxHostID(self, spUUID, maxID):
"""
Set maximum host ID
"""
self.log.error("TODO: Implement")
self._maxHostID
self.spmMailer.setMaxHostID(maxID)
raise se.NotImplementedException
def detachAllDomains(self):
"""
Detach all domains from pool before destroying pool
"""
# First find out this pool master domain
# Find out domain list from the pool metadata
domList = self.getDomains().keys()
for sdUUID in domList:
# master domain should be detached last, after spm is stopped
if sdUUID == self.masterDomain.sdUUID:
continue
self.detachSD(sdUUID=sdUUID, msdUUID=sd.BLANK_UUID, masterVersion=0)
self.stopSpm(self.spUUID)
# Forced detach 'master' domain after stopping SPM
self.detachSD(self.masterDomain.sdUUID, sd.BLANK_UUID, 0, __securityOverride=True)
def setVolumeDescription(self, sdUUID, imgUUID, volUUID, description):
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive):
sdCache.produce(sdUUID).produceVolume(imgUUID=imgUUID,
volUUID=volUUID).setDescription(descr=description)
def setVolumeLegality(self, sdUUID, imgUUID, volUUID, legality):
imageResourcesNamespace = sd.getNamespace(sdUUID, IMAGE_NAMESPACE)
with rmanager.acquireResource(imageResourcesNamespace, imgUUID, rm.LockType.exclusive):
sdCache.produce(sdUUID).produceVolume(imgUUID=imgUUID,
volUUID=volUUID).setLegality(legality=legality)
def checkDomain(self, sdUUID):
return sdCache.produce(sdUUID).checkDomain(spUUID=self.spUUID)
def getVmsList(self, sdUUID=None):
if sdUUID == None:
dom = self.masterDomain
else:
dom = sdCache.produce(sdUUID)
return dom.getVMsList()
def getVmsInfo(self, sdUUID, vmList=None):
return sdCache.produce(sdUUID).getVMsInfo(vmList=vmList)
def uploadVolume(self, sdUUID, imgUUID, volUUID, srcPath, size, method="rsync"):
vol = sdCache.produce(sdUUID).produceVolume(imgUUID, volUUID)
if not vol.isLeaf():
raise se.NonLeafVolumeNotWritable(vol)
targetPath = vol.getVolumePath()
if vol.isSparse():
vol.extend(int(size))
vol.prepare(rw=True, setrw=False)
try:
if method.lower() == "wget":
cmd = [constants.EXT_WGET, "-O", targetPath, srcPath]
(rc, out, err) = misc.execCmd(cmd, sudo=False)
if rc:
self.log.error("uploadVolume - error while trying to retrieve: %s into: %s, stderr: %s" % (srcPath, targetPath, err))
raise se.VolumeCopyError(vol, err)
#CR : should be elif 'rsync' and and else "error not supported" in the end
else:
cmd = [constants.EXT_RSYNC, "-aq", srcPath, targetPath]
(rc, out, err) = misc.execCmd(cmd, sudo=False)
if rc:
self.log.error("uploadVolume - error while trying to copy: %s into: %s, stderr: %s" % (srcPath, targetPath, err))
raise se.VolumeCopyError(vol, err)
finally:
try:
vol.teardown(sdUUID, volUUID)
except:
self.log.warning("SP %s SD %s img %s Vol %s - teardown failed")
def preDeleteRename(self, sdUUID, imgUUID):
repoPath = os.path.join(self.storage_repository, self.spUUID)
return image.Image(repoPath).preDeleteRename(sdUUID, imgUUID)
def validateDelete(self, sdUUID, imgUUID):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).validateDelete(sdUUID, imgUUID)
def validateImage(self, srcDomUUID, dstDomUUID, imgUUID, op=image.MOVE_OP):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).validate(srcDomUUID, dstDomUUID, imgUUID, op)
def validateVolumeChain(self, sdUUID, imgUUID):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).validateVolumeChain(sdUUID, imgUUID)
def checkImage(self, sdUUID, imgUUID):
repoPath = os.path.join(self.storage_repository, self.spUUID)
image.Image(repoPath).check(sdUUID, imgUUID)
def extendSD(sdUUID, devlist):
sdCache.produce(sdUUID).extend(devlist)
|
openSUSE/vdsm
|
vdsm/storage/sp.py
|
Python
|
gpl-2.0
| 80,533
|
# -*- encoding: utf-8 -*-
from app.database import init_db
init_db()
|
berlotto/cinemark
|
criadb.py
|
Python
|
gpl-2.0
| 71
|
#! /usr/bin/env python
# encoding: utf-8
import os,shutil,sys,platform
import TaskGen,Task,Build,Options,Utils
from TaskGen import taskgen,feature,after,before
from Logs import error,debug
app_info='''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET']
elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ:
if sys.platform=='darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2])
def apply_framework(self):
for x in self.to_list(self.env['FRAMEWORKPATH']):
frameworkpath_st='-F%s'
self.env.append_unique('CXXFLAGS',frameworkpath_st%x)
self.env.append_unique('CCFLAGS',frameworkpath_st%x)
self.env.append_unique('LINKFLAGS',frameworkpath_st%x)
for x in self.to_list(self.env['FRAMEWORK']):
self.env.append_value('LINKFLAGS',['-framework',x])
def create_bundle_dirs(self,name,out):
bld=self.bld
dir=out.parent.get_dir(name)
if not dir:
dir=out.__class__(name,out.parent,1)
bld.rescan(dir)
contents=out.__class__('Contents',dir,1)
bld.rescan(contents)
macos=out.__class__('MacOS',contents,1)
bld.rescan(macos)
return dir
def bundle_name_for_output(out):
name=out.name
k=name.rfind('.')
if k>=0:
name=name[:k]+'.app'
else:
name=name+'.app'
return name
def create_task_macapp(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
apptask=self.create_task('macapp',self.env)
apptask.set_inputs(self.link_task.outputs)
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','MacOS',out.name])
apptask.set_outputs([n1])
apptask.chmod=0755
apptask.install_path=os.path.join(self.install_path,name,'Contents','MacOS')
self.apptask=apptask
def create_task_macplist(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
if not getattr(self,'mac_plist',False):
self.mac_plist=app_info
plisttask=self.create_task('macplist',self.env)
plisttask.set_inputs(self.link_task.outputs)
out=self.link_task.outputs[0]
self.mac_plist=self.mac_plist%(out.name)
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','Info.plist'])
plisttask.set_outputs([n1])
plisttask.mac_plist=self.mac_plist
plisttask.install_path=os.path.join(self.install_path,name,'Contents')
self.plisttask=plisttask
def apply_link_osx(self):
name=self.link_task.outputs[0].name
if getattr(self,'vnum',None):
name=name.replace('.dylib','.%s.dylib'%self.vnum)
path=os.path.join(Utils.subst_vars(self.install_path,self.env),name)
if'-dynamiclib'in self.env['LINKFLAGS']:
self.env.append_value('LINKFLAGS','-install_name')
self.env.append_value('LINKFLAGS',path)
def apply_bundle(self):
if not('cshlib'in self.features or'shlib'in self.features):return
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
self.env['shlib_PATTERN']=self.env['macbundle_PATTERN']
uselib=self.uselib=self.to_list(self.uselib)
if not'MACBUNDLE'in uselib:uselib.append('MACBUNDLE')
def apply_bundle_remove_dynamiclib(self):
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
if not getattr(self,'vnum',None):
try:
self.env['LINKFLAGS'].remove('-dynamiclib')
except ValueError:
pass
app_dirs=['Contents','Contents/MacOS','Contents/Resources']
def app_build(task):
env=task.env
shutil.copy2(task.inputs[0].srcpath(env),task.outputs[0].abspath(env))
return 0
def plist_build(task):
env=task.env
f=open(task.outputs[0].abspath(env),"w")
f.write(task.mac_plist)
f.close()
return 0
Task.task_type_from_func('macapp',vars=[],func=app_build,after="cxx_link cc_link static_link")
Task.task_type_from_func('macplist',vars=[],func=plist_build,after="cxx_link cc_link static_link")
feature('cc','cxx')(set_macosx_deployment_target)
before('apply_lib_vars')(set_macosx_deployment_target)
feature('cc','cxx')(apply_framework)
after('apply_lib_vars')(apply_framework)
taskgen(create_bundle_dirs)
taskgen(create_task_macapp)
after('apply_link')(create_task_macapp)
feature('cprogram')(create_task_macapp)
after('apply_link')(create_task_macplist)
feature('cprogram')(create_task_macplist)
after('apply_link')(apply_link_osx)
feature('cshlib')(apply_link_osx)
before('apply_link','apply_lib_vars')(apply_bundle)
feature('cc','cxx')(apply_bundle)
after('apply_link')(apply_bundle_remove_dynamiclib)
feature('cshlib')(apply_bundle_remove_dynamiclib)
|
diedthreetimes/VCrash
|
pybindgen-0.15.0.795/.waf-1.5.9-0c853694b62ef4240caa9158a9f2573d/wafadmin/Tools/osx.py
|
Python
|
gpl-2.0
| 4,973
|
from setuptools import setup
import io
import os
import re
import sys
if sys.version_info[0] > 2:
raise RuntimeError("esptool.py only supports Python 2.x")
# Example code to pull version from esptool.py with regex, taken from
# http://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = """
==========
esptool.py
==========
A command line utility to communicate with the ROM bootloader in Espressif ESP8266 WiFi microcontroller.
Allows flashing firmware, reading back firmware, querying chip parameters, etc.
The esptool.py project is hosted on github: https://github.com/espressif/esptool
Installation
------------
esptool can be installed via pip:
$ pip install --upgrade esptool
(esptool.py requires Python 2. If your default pip version is Python 3, install via ``pip2 install esptool``.)
Usage
-----
Please see the `Usage section of the README.md file <https://github.com/espressif/esptool#usage>`_.
You can also get help information by running `esptool.py --help`.
Contributing
------------
Please see the `CONTRIBUTING.md file on github <https://github.com/espressif/esptool/blob/master/CONTRIBUTING.md>`_.
"""
setup(
name='esptool',
py_modules=['esptool'],
version=find_version('esptool.py'),
description='A utility to communicate with the ROM bootloader in Espressif ESP8266.',
long_description=long_description,
url='https://github.com/espressif/esptool',
author='Fredrik Ahlberg (themadinventor) & Angus Gratton (projectgus)',
author_email='angus@espressif.com',
license='GPLv2+',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Embedded Systems',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2.7',
],
setup_requires=[
'flake8<3.0.0',
],
install_requires=[
'pyserial>=2.5',
],
scripts=[
'esptool.py',
],
)
|
IntellEyes/embeddedgurufirmware
|
setup.py
|
Python
|
gpl-2.0
| 2,778
|
#!/home/venturi/django/cms/env/bin/python2
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
Venturi/cms
|
env/bin/pilprint.py
|
Python
|
gpl-2.0
| 2,418
|
from http.cookies import SimpleCookie
import json
import sys, os, requests, uuid
from threading import Thread
from time import sleep
from Crypto import Random
from Crypto.PublicKey import RSA
from PyQt5.QtCore import QThread
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QSystemTrayIcon
from login import LoginForm
from settings import HTTP_PROTOCOL
from settings import SERVER_URL
from timestamp.form import TimestampForm
class SystemTrayIcon(QSystemTrayIcon):
def __init__(self):
QSystemTrayIcon.__init__(self)
self.http_client = requests.Session()
self.base_url = '{}://{}'.format(HTTP_PROTOCOL, SERVER_URL)
self.set_desktop_timezone()
# init icons
self.icon_states = {
'disconnect': QIcon('icons/icon-placeholder_128x128_no_connection.png'),
'logged_out': QIcon('icons/icon-placeholder_128x128_red.png'),
'logged_in': QIcon('icons/icon-placeholder_128x128_green.png') ,
}
self.changeIcon('logged_out')
self.uuid = self.create_uuid('TTASM')
self.create_private_key()
try:
self.http_client.get(self.base_url)
self.server_accessible = True
self.set_server_public_key()
self.present_login_form()
except:
self.server_accessible = False
t = AccessibilityWorker(self)
t.start()
self.set_server_public_key()
self.create_ui()
self.msgButton.setEnabled(False)
def createURL(self, path):
return '{}{}'.format(self.base_url, path)
# Find Desktop's timezone
def set_desktop_timezone(self):
response = requests.get('http://freegeoip.net/json')
response_json = json.JSONDecoder().decode(response.text)
self.timezone = response_json['time_zone']
def verify_initial_data(self):
url = self.createURL('/initial_synchronization/?timezone={}'.format(self.timezone))
try:
response = self.http_client.get(url)
if response.status_code == 200:
self.last_timestamp = response.text
else:
raise Exception('Server errror: {}'.format(response.status_code))
except:
print('Something is wrong with server comms')
def set_server_public_key(self):
# get server public key
url = self.createURL('/public_key/')
print('Trying to get the public key from:', url)
try:
response = self.http_client.get(url)
except:
print('No response, server may be down')
try:
if response.status_code == 200:
self.server_rsa_pub = RSA.importKey(response.text)
print('Server private key aquired')
else:
print('Server failed to provide public key')
except:
print("\nServer is not responding")
# self.loginForm.close()
def create_private_key(self):
# Create new client RSA private key, public key and public key hash and store them to disk
random_generator = Random.new().read
self.client_rsa = RSA.generate(2048, random_generator)
print ('Client private key created')
# with open('./clientdata/client_RSA', 'wb') as f:
# f.write(cl_rsa.exportKey())
# with open('./clientdata/client_RSA.pub', 'wb') as f:
# f.write(cl_rsa.publickey().exportKey())
# with open('./clientdata/client_RSA.hash', 'w') as f:
# f.write(SHA256.new(cl_rsa.publickey().exportKey()).hexdigest())
print ('Client keys created')
def create_ui(self):
"""Create user interface of Tray icon"""
mainMenu = QMenu()
subMenu = QMenu(mainMenu)
subMenu.setTitle("Util")
subButton_1 = subMenu.addAction("Show token")
subButton_1.triggered.connect(self.show_token)
subButton_2 = subMenu.addAction("Test sockets")
subButton_2.triggered.connect(self.test_sockets)
# Set the order of layout and add everything to main menu
self.logInButton = mainMenu.addAction("Log in")
self.logInButton.triggered.connect(self.present_login_form)
self.simButton = mainMenu.addAction("Let's pretend server is accessible")
self.simButton.triggered.connect(self.enable_login_etc)
mainMenu.addSeparator()
self.msgButton = mainMenu.addAction("Send message") # find a way how to hide this button to preserve action on it before user's log in action
self.msgButton.triggered.connect(self.present_timestamp_form)
if not self.server_accessible:
self.logInButton.setEnabled(False)
self.msgButton.setEnabled(False)
else:
self.msgButton.setEnabled(True)
mainMenu.addSeparator()
mainMenu.addMenu(subMenu)
mainMenu.addSeparator()
mainMenu.addSeparator()
exitButton = mainMenu.addAction("Exit")
exitButton.triggered.connect(self.quit)
self.setContextMenu(mainMenu)
def accessibility_worker(self):
while (not self.server_accessible):
try:
self.http_client.get(self.base_url)
self.server_accessible = True
self.enable_login_etc()
except:
sleep(5)
def changeIcon(self, state):
self.setIcon(self.icon_states[state])
def enable_login_etc(self):
self.logInButton.setEnabled(True)
self.msgButton.setEnabled(True)
self.showMessage('Connected',
'Server is accessible again',
QSystemTrayIcon.Information,
3000)
def logged_in_state(self, loggedIn):
# TODO: add corresponding icon change once the code is available
if loggedIn:
self.changeIcon('logged_in')
self.logInButton.setText('Log Out')
self.logInButton.disconnect()
self.logInButton.triggered.connect(self.logout)
else:
self.changeIcon('logged_out')
self.logInButton.setText('Log In')
self.logInButton.disconnect()
self.logInButton.triggered.connect(self.present_login_form)
def create_uuid(self, UUID_string):
return uuid.uuid3(uuid.NAMESPACE_DNS, UUID_string)
def present_login_form(self):
self.login_form = LoginForm(self)
self.login_form.show()
def present_timestamp_form(self):
url = self.createURL('/last_activity_duration/')
response = self.http_client.get(url)
self.timestamp_form = TimestampForm(self, response.text)
self.timestamp_form.show()
def show_token(self):
"""Placeholder function"""
try:
self.showMessage('Token',
self.token,
QSystemTrayIcon.Information,
3000)
except:
self.showMessage('Token',
'No token received',
QSystemTrayIcon.Information,
3000)
def test_sockets(self):
"""Placeholder function"""
self.showMessage('Testing',
'Pending implementation',
QSystemTrayIcon.Information,
3000)
# How to logout currently logged in user through get request
def logout(self):
url = self.createURL('/user_logout/')
response = self.http_client.get(url)
s_cookie = SimpleCookie()
s_cookie.load(response.headers['Set-Cookie'])
c_cookie = SimpleCookie()
c_cookie.load(response.request.headers['Cookie'])
if response.status_code == 200:
if 'sessionid' in c_cookie and 'sessionid' not in s_cookie:
print("User is still logged in")
else:
print("User is logged out")
self.logged_in_state(False)
self.msgButton.setEnabled(False)
def quit(self):
"""Exit program in a clean way."""
if os.path.isfile('pid'):
os.remove('pid')
print ("Deleting pid file")
print ("Exiting")
sys.exit(0)
class AccessibilityWorker(QThread):
def __init__(self, parent, *args, **kwargs):
self.parent = parent
self.parent.changeIcon('disconnect')
super(AccessibilityWorker, self).__init__(*args, **kwargs)
def run(self):
while (not self.parent.server_accessible):
print('checking server accessibility...')
try:
print('1. connecting')
self.parent.http_client.get(self.parent.base_url)
print('2. setting server accessible variable to True')
self.parent.server_accessible = True
print('3. changing icon to logged_out')
self.parent.changeIcon('logged_out')
print('4. enabling login etc.')
self.parent.enable_login_etc()
print('server is up')
except:
print('\t\t-- waiting for 2 seconds --')
sleep(2)
|
coremind-oss/ttasm-desktop-app
|
tray.py
|
Python
|
gpl-2.0
| 9,573
|
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list= [[traindat,testdat,label_traindat],[traindat,testdat,label_traindat]]
def kernel_combined_custom_poly_modular(fm_train_real = traindat,fm_test_real = testdat,fm_label_twoclass=label_traindat):
from shogun.Features import CombinedFeatures, RealFeatures, BinaryLabels
from shogun.Kernel import CombinedKernel, PolyKernel, CustomKernel
from shogun.Classifier import LibSVM
kernel = CombinedKernel()
feats_train = CombinedFeatures()
tfeats = RealFeatures(fm_train_real)
tkernel = PolyKernel(10,3)
tkernel.init(tfeats, tfeats)
K = tkernel.get_kernel_matrix()
kernel.append_kernel(CustomKernel(K))
subkfeats_train = RealFeatures(fm_train_real)
feats_train.append_feature_obj(subkfeats_train)
subkernel = PolyKernel(10,2)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
labels = BinaryLabels(fm_label_twoclass)
svm = LibSVM(1.0, kernel, labels)
svm.train()
kernel = CombinedKernel()
feats_pred = CombinedFeatures()
pfeats = RealFeatures(fm_test_real)
tkernel = PolyKernel(10,3)
tkernel.init(tfeats, pfeats)
K = tkernel.get_kernel_matrix()
kernel.append_kernel(CustomKernel(K))
subkfeats_test = RealFeatures(fm_test_real)
feats_pred.append_feature_obj(subkfeats_test)
subkernel = PolyKernel(10, 2)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_pred)
svm.set_kernel(kernel)
svm.apply()
km_train=kernel.get_kernel_matrix()
return km_train,kernel
if __name__=='__main__':
kernel_combined_custom_poly_modular(*parameter_list[0])
|
ratschlab/ASP
|
examples/undocumented/python_modular/kernel_combined_custom_poly_modular.py
|
Python
|
gpl-2.0
| 1,872
|
# -*- coding: utf-8 -*-
"""
Hall Effect Plugin
Copyright (C) 2013 Olaf Lรผke <olaf@tinkerforge.com>
Copyright (C) 2014-2016 Matthias Bolte <matthias@tinkerforge.com>
hall_effect.py: Hall Effect Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QFrame, QVBoxLayout, QHBoxLayout, QLabel, QSpinBox, \
QComboBox, QPushButton
from brickv.plugin_system.plugin_base import PluginBase
from brickv.bindings.bricklet_hall_effect import BrickletHallEffect
from brickv.async_call import async_call
from brickv.plot_widget import PlotWidget, CurveValueWrapper, FixedSizeLabel
from brickv.callback_emulator import CallbackEmulator
class CountLabel(FixedSizeLabel):
def setText(self, text):
text = 'Count: ' + str(text)
super().setText(text)
class HallEffect(PluginBase):
def __init__(self, *args):
super().__init__(BrickletHallEffect, *args)
self.hf = self.device
self.cbe_edge_count = CallbackEmulator(self,
self.get_edge_count,
False,
self.cb_edge_count,
self.increase_error_count,
expand_result_tuple_for_callback=True)
self.current_value = CurveValueWrapper()
self.label_count = CountLabel('Count')
plots = [('Value', Qt.red, self.current_value, str)]
self.plot_widget = PlotWidget('Value', plots, extra_key_widgets=[self.label_count], update_interval=0.05)
self.plot_widget.set_fixed_y_scale(0, 1, 1, 1)
self.combo_edge_type = QComboBox()
self.combo_edge_type.addItem('Rising')
self.combo_edge_type.addItem('Falling')
self.combo_edge_type.addItem('Both')
self.combo_edge_type.currentIndexChanged.connect(self.edge_changed)
self.spin_debounce = QSpinBox()
self.spin_debounce.setMinimum(0)
self.spin_debounce.setMaximum(255)
self.spin_debounce.setValue(100)
self.spin_debounce.editingFinished.connect(self.debounce_changed)
self.button_reset = QPushButton('Reset Count')
self.button_reset.clicked.connect(self.reset_count)
hlayout = QHBoxLayout()
hlayout.addWidget(QLabel('Edge Type:'))
hlayout.addWidget(self.combo_edge_type)
hlayout.addStretch()
hlayout.addWidget(QLabel('Debounce Period [ms]:'))
hlayout.addWidget(self.spin_debounce)
hlayout.addStretch()
hlayout.addWidget(self.button_reset)
line = QFrame()
line.setObjectName("line")
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
layout = QVBoxLayout(self)
layout.addWidget(self.plot_widget)
layout.addWidget(line)
layout.addLayout(hlayout)
def debounce_changed(self):
self.hf.set_edge_count_config(self.combo_edge_type.currentIndex(), self.spin_debounce.value())
def edge_changed(self, _value):
self.hf.set_edge_count_config(self.combo_edge_type.currentIndex(), self.spin_debounce.value())
def get_edge_count(self, reset):
edge_count = self.hf.get_edge_count(reset)
value = self.hf.get_value()
if reset:
edge_count = 0
return edge_count, value
def cb_edge_count(self, edge_count, value):
if value:
self.current_value.value = 1
else:
self.current_value.value = 0
self.label_count.setText(edge_count)
def get_edge_count_config_async(self, edge_type, debounce):
self.combo_edge_type.setCurrentIndex(edge_type)
self.spin_debounce.setValue(debounce)
def reset_count(self):
async_call(self.get_edge_count, True, self.cb_edge_count, self.increase_error_count,
expand_result_tuple_for_callback=True)
def start(self):
async_call(self.hf.get_edge_count_config, None, self.get_edge_count_config_async, self.increase_error_count,
expand_result_tuple_for_callback=True)
self.cbe_edge_count.set_period(50)
self.plot_widget.stop = False
def stop(self):
self.cbe_edge_count.set_period(0)
self.plot_widget.stop = True
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletHallEffect.DEVICE_IDENTIFIER
|
Tinkerforge/brickv
|
src/brickv/plugin_system/plugins/hall_effect/hall_effect.py
|
Python
|
gpl-2.0
| 5,198
|
'''
Copyright (C) 2012 mentalsmash.org <contact@mentalsmash.org>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the
Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
Boston, MA 02110-1301, USA.
'''
from mentalsmash.geekmap import IMDbHelper
import mentalsmash.geekmap
'''
Created on Oct 26, 2012
@author: as
'''
def default_serializer():
return FileSerializer()
class Serializer(object):
'''
Serializes a GeekMap into a custom format
'''
def __init__(self):
'''
Constructor
'''
self.current_map = None
def serialize(self, geek_map):
'''
'''
self.current_map = geek_map
print "Serializing GeekMap " + str(geek_map)
self.start_serialize()
self.start_serialize_topic()
print "-- Serializing topics..."
for topic in self.current_map.topics():
print "--- " + str(topic)
self.serialize_topic(topic)
self.end_serialize_topic()
self.start_serialize_topic_entity()
print "-- Serializing topic entities..."
for topic in self.current_map.topic_entities():
print "--- " + str(topic)
self.serialize_topic_entity(topic)
self.end_serialize_topic_entity()
self.start_serialize_entity()
print "-- Serializing entities..."
for entity in self.current_map.entities():
print "--- " + str(entity)
self.serialize_entity(entity)
self.end_serialize_entity()
self.start_serialize_path()
print "-- Serializing paths..."
for path in self.current_map.paths():
print "--- " + str(path)
self.serialize_path(path)
self.end_serialize_path()
self.end_serialize()
print "GeekMap " + str(geek_map) + " serialized."
self.current_map = None
def start_serialize(self):
'''
'''
def end_serialize(self):
'''
'''
def serialize_topic(self, topic):
'''
'''
def serialize_topic_entity(self, topic):
'''
'''
def serialize_path(self, path):
'''
'''
def serialize_entity(self, entity):
'''
'''
def start_serialize_topic(self):
'''
'''
def end_serialize_topic(self):
'''
'''
def start_serialize_topic_entity(self):
'''
'''
def end_serialize_topic_entity(self):
'''
'''
def start_serialize_path(self):
'''
'''
def end_serialize_path(self):
'''
'''
def start_serialize_entity(self):
'''
'''
def end_serialize_entity(self):
'''
'''
class Loader(object):
def __init__(self):
'''
'''
def load(self, args):
'''
'''
class FileSerializer(Serializer):
'''
Serializes a GeekMap into a file
'''
def __init__(self, file_ext='geekmap', field_separator='; ', path_separator=", ", topic_prefix='topic', topic_entity_prefix='topic_entity', path_prefix='path', entity_prefix='entity', eol='\n'):
'''
Constructor
'''
self.file_ext = file_ext
self.path_separator = path_separator
self.field_separator = field_separator
self.path_prefix = path_prefix
self.topic_prefix = topic_prefix
self.topic_entity_prefix = topic_entity_prefix
self.entity_prefix = entity_prefix
self.eol = eol
def start_serialize(self):
'''
'''
file_name = self.current_map.id + '.' + self.file_ext
self.file = open(file_name, 'w')
def end_serialize(self):
'''
'''
self.file.close()
def serialize_path(self, path):
'''
'''
path_line = self.path_separator.join(map(IMDbHelper.get_entity_id, path))
file_line = self.current_map.id + self.field_separator + self.path_prefix + self.field_separator + path_line
self.file.write(file_line + self.eol)
def serialize_topic(self, topic):
'''
'''
topic_line = topic.type + self.field_separator + self.path_separator.join(map(str, topic.keywords))
file_line = self.current_map.id + self.field_separator + self.topic_prefix + self.field_separator + topic_line
self.file.write(file_line + self.eol)
def serialize_topic_entity(self, entity):
'''
'''
topic_line = IMDbHelper.get_entity_id(entity)
file_line = self.current_map.id + self.field_separator + self.topic_entity_prefix + self.field_separator + str(topic_line)
self.file.write(file_line + self.eol)
def serialize_entity(self, entity):
'''
'''
entity_line = IMDbHelper.get_entity_id(entity)
file_line = self.current_map.id + self.field_separator + self.entity_prefix + self.field_separator + entity_line
self.file.write(file_line + self.eol)
class FileLoader(Loader):
def __init__(self, file_ext='geekmap', field_separator='; ', path_separator=", ", topic_prefix='topic', topic_entity_prefix='topic_entity', path_prefix='path', entity_prefix='entity', eol='\n'):
'''
Constructor
'''
self.file_ext = file_ext
self.path_separator = path_separator
self.field_separator = field_separator
self.path_prefix = path_prefix
self.topic_prefix = topic_prefix
self.topic_entity_prefix = topic_entity_prefix
self.entity_prefix = entity_prefix
self.eol = eol
self.current_map = None
def load(self, args):
'''
'''
file_name = args['file']
try :
map_file = open(file_name, 'r')
except Exception :
print "Cannot open file : " + file_name
return None
def line_handler(loader, x):
return {
loader.topic_prefix : loader.load_topic,
loader.topic_entity_prefix : loader.load_topic_entity,
loader.path_prefix : loader.load_path,
loader.entity_prefix : loader.load_entity }.get(x, None)
for line in [line.rstrip('\n') for line in map_file] :
# print "Read line: '" + line + "'"
fields = line.split(self.field_separator)
fields = map(lambda x : x.strip(), fields)
map_id = fields[0]
if (self.current_map == None) :
self.load_map(map_id)
line_t = fields[1]
load_fn = line_handler(self, line_t)
if (load_fn != None) :
load_fn(fields)
else :
print "Unknown type of line : " + line
loaded_map = self.current_map
self.current_map = None
return loaded_map
def load_map(self, map_id):
# print 'Parsing GeekMap : ' + map_id
self.current_map = mentalsmash.geekmap.create(map_id=map_id)
def load_topic(self, fields):
# print 'Parsing topic : ' + fields[2]
topic_t = fields[2]
topic_keyw = fields[3].split(self.path_separator)
self.current_map.add_topic(tuple([topic_t, topic_keyw]))
def load_topic_entity(self, fields):
print 'Parsing topic_entity : ' + fields[2]
entity_id = fields[2]
entity = IMDbHelper.lookup_entity(entity_id)
self.current_map.topic_entities().add(entity)
def load_path(self, fields):
# print 'Parsing path : ' + fields[2]
path = fields[2].split(self.path_separator)
path = map(lambda x : x.strip(), path)
self.current_map.add_path(tuple(path))
def load_entity(self, fields):
# print 'Parsing entity : ' + fields[2]
entity_id = fields[2]
self.current_map.get_entity(entity_id)
class SQLiteSerializer(Serializer):
'''
Serializes a GeekMap into an sqlite database
'''
def __init__(self, database='geek-map.sqlite'):
'''
Constructor
'''
self.database = database
|
mentalsmash/geek-map
|
mentalsmash/geekmap/Persistence.py
|
Python
|
gpl-2.0
| 9,045
|
#!/usr/bin/env python
## @package fill_missing
# \brief This script solves the Laplace equation as a method of filling holes in map-plane data.
#
# \details The script is an implementation of the SOR method with Chebyshev
# acceleration for the Laplace equation, as described in 'Numerical Recipes in
# Fortran: the art of scientific computing' by William H. Press et al -- 2nd
# edition.
#
# Note also that this script can be used both from the command line and as a
# Python module -- by adding 'from fill_missing import laplace' to your
# program.
# Uses an approximation to Laplace's equation
# \f[ \nabla^2 u = 0 \f]
# to smoothly replace missing values in two-dimensional NetCDF variables with the average of the ``nearby'' non-missing values.
# Here is hypothetical example, filling the missing values in the variables \c topg and \c usurf, using a convergence tolerance of \f$10^{-4}\f$ and the initial guess of \f$100\f$, on data in the NetCDF file \c data.nc :
# \code
# fill_missing.py -f data.nc -v topg,usurf --eps=1.0e-4 \
# -i 100.0 -o data_smoothed.nc
# \endcode
# Options \c -i and \c -e specify the initial guess and the convergence tolerance for \e all the specified variables, so using these options only makes sense if all the variables have the same units. Moreover, making a good initial guess can noticeably reduce the time needed to fill in the holes. Generally variables should be filled one at a time.
#
# Each of the requested variables must have missing values specified
# according to CF Metadata conventions, namely one of the following:
# \c valid_range or both of \c valid_min and
# \c valid_max (if the values are in a specific range); one of
# \c valid_min (\c valid_max) if values are greater (less)
# than some value, or \c _FillValue. Also \c _FillValue is
# interpreted as \c valid_max if it is positive, and as
# \c valid_min otherwise, and the \c missing_value attribute is deprecated
# by the NetCDF User's Guide, but is supported for backward compatibility. For more information see
# <a href="http://www.unidata.ucar.edu/software/netcdf/guide_10.html#SEC76">NetCDF User's Guide: Attributes</a>.
# Run \verbatim fill_missing.py --help \endverbatim for the list of available
# command-line options.
# CK, 08/12/2008
from numpy import *
## Computes \f$\rho_{Jacobi}\f$, see formula (19.5.24), page 858.
def rho_jacobi((J,L)):
return (cos(pi/J) + cos(pi/L))/2
## This makes the stencil wrap around the grid. It is unclear if this should be
# done, but it allows using a 4-point stencil for all points, even if they
# are on the edge of the grid (otherwise we need to use three points on the
# sides and two in the corners).
#
# Is and Js are arrays with row- and column-indices, M and N are the grid
# dimensions.
def fix_indices(Is, Js, (M, N)):
Is[Is == M] = 0
Is[Is == -1] = M-1
Js[Js == N] = 0
Js[Js == -1] = N-1
return (Is, Js)
## \brief laplace solves the Laplace equation
# \details laplace solves the Laplace equation using the SOR method with Chebyshev
# acceleration as described in 'Numerical Recipes in Fortran: the art of
# scientific computing' by William H. Press et al -- 2nd edition, section
# 19.5.
#
# data is a 2-d array (computation grid)
#
# mask is a boolean array; setting mask to 'data == 0', for example, results
# in only modifying points where 'data' is zero, all the other points
# are left as is. Intended use: if in an array the value of -9999.0
# signifies a missing value, then setting mask to 'data == -9999.0'
# fills in all the missing values.
#
# eps1 is the first stopping criterion: the iterations stop if the norm of
# residual becomes less than eps1*initial_norm, where 'initial_norm' is
# the initial norm of residual. Setting eps1 to zero or a negative
# number disables this stopping criterion.
#
# eps2 is the second stopping criterion: the iterations stop if the absolute
# value of the maximal change in value between successive iterations is
# less than eps2. Setting eps2 to zero or a negative number disables
# this stopping criterion.
#
# initial_guess is the initial guess used for all the values in the domain;
# the default is 'mean', i.e. use the mean of all the present values as
# the initial guess for missing values. initial_guess has to be 'mean'
# or a number.
#
# max_iter is the maximum number of iterations allowed. The default is 10000.
def laplace(data, mask, eps1, eps2, initial_guess='mean', max_iter=10000):
dimensions = data.shape
rjac = rho_jacobi(dimensions)
i, j = indices(dimensions)
# This splits the grid into 'odd' and 'even' parts, according to the
# checkerboard pattern:
odd = (i % 2 == 1) ^ (j % 2 == 0)
even = (i % 2 == 0) ^ (j % 2 == 0)
# odd and even parts _in_ the domain:
odd_part = zip(i[mask & odd], j[mask & odd])
even_part = zip(i[mask & even], j[mask & even])
# relative indices of the stencil points:
k = array([0, 1, 0, -1])
l = array([-1, 0, 1, 0])
parts = [odd_part, even_part]
try:
initial_guess = float(initial_guess)
except:
if initial_guess == 'mean':
present = array(ones_like(mask) - mask, dtype=bool)
initial_guess = mean(data[present])
else:
print """ERROR: initial_guess of '%s' is not supported (it should be a number or 'mean').
Note: your data was not modified.""" % initial_guess
return
data[mask] = initial_guess
print "Using the initial guess of %10f." % initial_guess
# compute the initial norm of residual
initial_norm = 0.0
for m in [0,1]:
for i,j in parts[m]:
Is, Js = fix_indices(i + k, j + l, dimensions)
xi = sum(data[Is, Js]) - 4 * data[i,j]
initial_norm += abs(xi)
print "Initial norm of residual =", initial_norm
print "Criterion is (change < %f) OR (res norm < %f (initial norm))." % (eps2,eps1)
omega = 1.0
# The main loop:
for n in arange(max_iter):
anorm = 0.0
change = 0.0
for m in [0,1]:
for i,j in parts[m]:
# stencil points:
Is, Js = fix_indices(i + k, j + l, dimensions)
residual = sum(data[Is, Js]) - 4 * data[i,j]
delta = omega * 0.25 * residual
data[i,j] += delta
# record the maximal change and the residual norm:
anorm += abs(residual)
if abs(delta) > change:
change = abs(delta)
# Chebyshev acceleration (see formula 19.5.30):
if n == 1 and m == 1:
omega = 1.0/(1.0 - 0.5 * rjac**2)
else:
omega = 1.0/(1.0 - 0.25 * rjac**2 * omega)
print "max change = %10f, residual norm = %10f" % (change, anorm)
if (anorm < eps1*initial_norm) or (change < eps2):
print "Exiting with change=%f, anorm=%f after %d iteration(s)." % (change,
anorm, n + 1)
return
print "Exceeded the maximum number of iterations."
return
if __name__ == "__main__":
from optparse import OptionParser
from sys import argv, exit
from shutil import copy, move
from tempfile import mkstemp
from os import close
from time import time, asctime
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
parser = OptionParser()
parser.usage = "%prog [options]"
parser.description = "Fills missing values in variables selected using -v in the file given by -f."
parser.add_option("-f", "--file", dest="input_filename",
help="input file")
parser.add_option("-v", "--vars", dest="variables",
help="comma-separated list of variables to process")
parser.add_option("-o", "--out_file", dest="output_filename",
help="output file")
parser.add_option("-e", "--eps", dest="eps",
help="convergence tolerance",
default="1.0")
parser.add_option("-i", "--initial_guess", dest="initial_guess",
help="initial guess to use; applies to all selected variables",
default="mean")
(options, args) = parser.parse_args()
if options.input_filename == "":
print """Please specify the input file name
(using the -f or --file command line option)."""
exit(-1)
input_filename = options.input_filename
if options.variables == "":
print """Please specify the list of variables to process
(using the -v or --variables command line option)."""
exit(-1)
variables = (options.variables).split(',')
if options.output_filename == "":
print """Please specify the output file name
(using the -o or --out_file command line option)."""
exit(-1)
output_filename = options.output_filename
eps = float(options.eps)
# Done processing command-line options.
print "Creating the temporary file..."
try:
(handle, tmp_filename) = mkstemp()
close(handle) # mkstemp returns a file handle (which we don't need)
copy(input_filename, tmp_filename)
except IOError:
print "ERROR: Can't create %s, Exiting..." % tmp_filename
try:
nc = NC(tmp_filename, 'a')
except Exception, message:
print message
print "Note: %s was not modified." % output_filename
exit(-1)
## add history global attribute (after checking if present)
historysep = ' '
historystr = asctime() + ': ' + historysep.join(argv) + '\n'
if 'history' in nc.ncattrs():
nc.history = historystr + nc.history # prepend to history string
else:
nc.history = historystr
t_zero = time()
for name in variables:
print "Processing %s..." % name
try:
var = nc.variables[name]
data = asarray(squeeze(var[:]))
attributes = ["valid_range", "valid_min", "valid_max",
"_FillValue", "missing_value"]
adict = {}
print "Reading attributes..."
for attribute in attributes:
print "* %15s -- " % attribute,
if attribute in var.ncattrs():
adict[attribute] = getattr(var, attribute)
print "found"
else:
print "not found"
if adict.has_key("valid_range"):
range = adict["valid_range"]
mask = ((data >= range[0]) & (data <= range[1]))
print "Using the valid_range attribute; range = ", range
elif adict.has_key("valid_min") and adict.has_key("valid_max"):
valid_min = adict["valid_min"]
valid_max = adict["valid_max"]
mask = ((data < valid_min) | (data > valid_max))
print """Using valid_min and valid_max attributes.
valid_min = %10f, valid_max = %10f.""" % (valid_min, valid_max)
elif adict.has_key("valid_min"):
valid_min = adict["valid_min"]
mask = data < valid_min
print "Using the valid_min attribute; valid_min = %10f" % valid_min
elif adict.has_key("valid_max"):
valid_max = adict["valid_max"]
mask = data > valid_max
print "Using the valid_max attribute; valid_max = %10f" % valid_max
elif adict.has_key("_FillValue"):
fill_value = adict["_FillValue"]
if fill_value <= 0:
mask = data <= fill_value + 2*finfo(float).eps
else:
mask = data >= fill_value - 2*finfo(float).eps
print "Using the _FillValue attribute; _FillValue = %10f" % fill_value
elif adict.has_key("missing_value"):
missing = adict["missing_value"]
mask = abs(data - missing) < 2*finfo(float).eps
print """Using the missing_value attribute; missing_value = %10f
Warning: this attribute is deprecated by the NUG.""" % missing
else:
print "No missing values found. Skipping this variable..."
continue
count = int(sum(mask))
if count == 0:
print "No missing values found. Skipping this variable..."
continue
print "Filling in %5d missing values..." % count
t0 = time()
laplace(data, mask, -1, eps, initial_guess=options.initial_guess)
var[:] = data
# now REMOVE missing_value and _FillValue attributes
try:
delattr(var, '_FillValue')
except:
pass
try:
delattr(var, 'missing_value')
except:
pass
print "This took %5f seconds." % (time() - t0)
except Exception, message:
print "ERROR:", message
print "Note: %s was not modified." % output_filename
exit(-1)
print "Processing all the variables took %5f seconds." % (time() - t_zero)
nc.close()
try:
move(tmp_filename, output_filename)
except:
print "Error moving %s to %s. Exiting..." % (tmp_filename,
output_filename)
exit(-1)
|
m8se/pism_refined
|
util/fill_missing.py
|
Python
|
gpl-2.0
| 13,675
|
self.description = "Sysupgrade of packages in 'IgnoreGroup'"
sp1 = pmpkg("pkg1", "1.0-2")
sp2 = pmpkg("pkg2", "1.0-2")
sp2.groups = ["grp"]
for p in sp1, sp2:
self.addpkg2db("sync", p)
lp1 = pmpkg("pkg1")
lp2 = pmpkg("pkg2")
for p in lp1, lp2:
self.addpkg2db("local", p)
self.option["IgnoreGroup"] = ["grp"]
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=pkg1|1.0-2")
self.addrule("!PKG_MODIFIED=pkg2")
|
ronin13/Pacman
|
test/pacman/tests/sync138.py
|
Python
|
gpl-2.0
| 442
|
#!/usr/bin/env python
try:
import redis
import datetime
except Exception:
msg= 'WARNING: Please check deps'
ret=1
print msg
exit(ret)
def getTimeOfSettingValue(conn, key, value):
before_set_time = datetime.datetime.now()
conn.set(key,value)
set_time = (datetime.datetime.now() - before_set_time).total_seconds()
return set_time
def getTimeOfGettingValue(conn, key):
before_get_time = datetime.datetime.now()
conn.get(key)
get_time = (datetime.datetime.now() - before_get_time).total_seconds()
return get_time
def getTimeOfDeletingValue(conn, key):
before_delete_time = datetime.datetime.now()
conn.delete(key)
delete_time = (datetime.datetime.now() - before_delete_time).total_seconds()
if conn.get(key):
msg = "CRITICAL: Test object still alive in the database."
ret = 2
print msg
exit(ret)
return delete_time
def printResultMessage(times):
msg = "OK: Conn time: " + str(times[0]) + ". Set time: " + str(times[1]) + ". Get time: " + str(times[2]) + ". Delete time: " + str(times[3])
ret = 0
print msg
exit(ret)
if __name__ == '__main__':
host = "localhost"
password = ""
key = "check_redis_testkey"
value = 1
try:
before_conn_time = datetime.datetime.now()
conn = redis.Redis(host=host,password=password)
conn_time = (datetime.datetime.now() - before_conn_time).total_seconds()
set_time = getTimeOfSettingValue(conn,key,value)
get_time = getTimeOfGettingValue(conn,key)
delete_time = getTimeOfDeletingValue(conn,key)
times = [conn_time,set_time,get_time,delete_time]
printResultMessage(times)
except Exception as exc:
msg = "CRITICAL: " + str(exc)
ret = 2
print msg
exit(ret)
|
jmferrete/nagios-plugins
|
check_redis.py
|
Python
|
gpl-2.0
| 1,647
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Overxfl0w13 #
"""You can define more kernel functions to test its performance whenever it respect the following condition:
http://latex.codecogs.com/gif.latex?\forall i\in K(x,y)\rightarrow i\geq 0
Some examples: http://gyazo.com/3b1d3ae355c2638f5ac4d98c82c31d12 (Theme 4: representation based on kernels.) Perception (PER), DSIC-UPV)
"""
# Kernel test -> hamming distance #
def kernel_sample_d_hamming(x,y):
if len(x)!=len(y):
print "Aborting, not same dimension ",x,y
exit()
else:
s=0
for d in xrange(len(x)): s+=abs(x[d]-y[d])
return 1/(s+1)
def perceptron_train(train_samples,kernel):
alpha,counter_end = [0 for x in xrange(len(train_samples))],0
while counter_end!=len(train_samples):
counterx,counter_end = 0,0
for x in train_samples:
gx,counterxi = 0,0
for xi in train_samples: gx += alpha[counterxi]*xi[1]*kernel(x[0],xi[0]) + alpha[counterxi]*xi[1]; counterxi+=1
if x[1]*gx<=0: alpha[counterx]+=1;
else: counter_end += 1
counterx += 1
return alpha
def perceptron_recog(train_samples,alpha,sample,kernel):
gx,counterxi = 0,0
for xi in train_samples: gx += alpha[counterxi]*xi[1]*kernel(sample,xi[0]) + alpha[counterxi]*xi[1]; counterxi+=1
return 1 if gx>=0 else -1
def classify(train_samples,sample):
alpha = perceptron_train(train_samples,kernel_sample_d_hamming)
cclass = perceptron_recog(train_samples,alpha,sample,kernel_sample_d_hamming)
return cclass
def __str__(): return "KernelPerceptron classifier"
"""if __name__ == '__main__':
train_samples = [([1,1],1),([2,2],-1),([1,3],1),([3,1],-1),([3,3],1)]
sample_test = [3,1]
print classify(train_samples,sample_test)
"""
|
overxfl0w/AdaBoost
|
Classifiers/KernelPerceptron.py
|
Python
|
gpl-2.0
| 1,702
|
from proteibb.core.properties import *
from proteibb.util.factory import *
class PlatformsGroup:
"""
Example of platforms list which could be used in some json file:
{
"platforma" : { builder-dependent-platforma-info },
"platformb" : { builder-dependent-platformb-info },
}
"""
def __init__(self, data, platform_factory):
if not isinstance(data, dict):
raise SyntaxError("invalid 'platforms.json' structure")
if not isinstance(platform_factory, FactoryInterface):
raise TypeError("invalid platform factory provided")
self._platforms = {}
for platform_name, platform_data in data.items():
self._platforms[platform_name] = platform_factory.make(platform_data)
def get_platform(self, platform_name):
if platform_name not in self._platforms:
raise ValueError('no platform found with name: ' + platform_name)
return self._platforms[platform_name]
class PlatformBase(Property.Handler):
def __init__(self, data, additional_properties=None):
properties = [
StringProperty('name', is_optional=False),
PropertyListAdapter('slaves', False, StringProperty),
]
if additional_properties:
properties.extend(additional_properties)
Property.Handler.__init__(self, properties, data)
@Property.Handler.replace
def name(self):
pass
@Property.Handler.replace
def slaves(self):
pass
|
simonenkos/proteibb
|
proteibb/core/platforms.py
|
Python
|
gpl-2.0
| 1,513
|
from django.forms import models, DateTimeField
__author__ = 'chelox'
#from django.db import models
from django.contrib.auth.models import User, Group, Permission
from usuarios.models import Usuario
from proyectos.models import Proyecto
from sprint.models import Estado
from clientes.models import Cliente
from sprint.models import Sprint
from django.utils.datetime_safe import date
from flujos.models import Flujos
from miembros.models import Miembro
#from django.contrib.auth.models import User
from django.utils.datetime_safe import date
from us.models import us, registroTrabajoUs
from flujos.models import Actividad
from roles.models import Rol
Usuario.objects.all().delete()
#Tipo_Item.objects.all().delete()
#Fase.objects.all().delete()
Proyecto.objects.all().delete()
#username='alforro'
#first_name= 'alvaro'
#last_name='test'
#cedula='4617510'
#email='alfa.alvaro.rodriguez@gmail.com'
#password='alforro'
#is_superuser=False
#Usuario.objects.create(username=username ,first_name= first_name, last_name=last_name, cedula=cedula, email=email, password= password, is_superuser=is_superuser)
#created_datetime = models.DateTimeField(2015,6,8,22,30,39,0)
pendiente = Estado(estado='Pendiente')
pendiente.save()
en_ejecucion = Estado(estado= 'En ejecucion')
en_ejecucion.save()
finalizado = Estado(estado='Finalizado')
finalizado.save()
##Permisos #####################################################################
permiso=Permission.objects.get(name='Can add us')
permiso2=Permission.objects.get(name='Can add sprint')
permiso3=Permission.objects.get(name='Can change actividad')
permiso4=Permission.objects.get(name='Can add actividad')
permiso.save()
permiso2.save()
permiso3.save()
permiso4.save()
#USUARIOS ##############################################################################################
usuario1 = Usuario.objects.create_user(username='alvaro_user', first_name='Alvaro', last_name='Rodriguez',telefono='0961940704',
cedula='4617510',direccion='Calle Rio Negro esq. Rio Jejui 315',
email='alfa.alvaro.rodriguez@gmail.com', password='alvaro_user')
usuario1.save()
usuario2 = Usuario.objects.create_user(username='homero', first_name='Homero', last_name='Simpson',telefono='0961940704',
direccion='Calle Rio Negro esq. Rio Jejui 315',
cedula='123467',email='amantedelacomida53@aol.com', password='homero')
usuario2.save()
usuario3 = Usuario.objects.create_user(username='walter', first_name='Walter', last_name='White',telefono='0961940704',
cedula='8910111', email='walter@gmail.com',direccion='San Lorenzo',
password='walter')
usuario3.save()
usuario4 = Usuario.objects.create_user(username='john', first_name='John', last_name='Snow',telefono='0961940704',
direccion='Fernando',
cedula='2131415', email='john@gmail.com',
password='john')
#usuario4.save()
#password='bruce'
usuario5 = Usuario.objects.create_user(username='bruce', first_name='Bruce', last_name='Banner',telefono='0961940704',
direccion='Capiata',
cedula='1617181', email='banner@gmail.com',
password='1234')
#usuario5.set_password(password)
usuario5.save()
# CLIENTES ################################################################################
cliente1=Cliente.objects.create_user(username= 'Marcelo', first_name='Marcelo', last_name= 'Vera', cedula=4593718,
direccion='Capiata',email='cheloxtreme@gmail.com',
password='1234')
cliente2=Cliente.objects.create_user(username= 'Gabriel', first_name='Gabriel', last_name= 'Duarte', cedula=4778963, email='chelo.vera@gmail.com',
direccion='San Lorenzo',
password='1234')
cliente3=Cliente.objects.create_user(username= 'Hugo', first_name='Hugo', last_name= 'Bolanhos', cedula=4794123, email='hugo@gmail.com',
direccion='San Lorenzo', password='1234')
cliente1.save()
cliente2.save()
cliente3.save()
## Proyecto Numero 1 "ALPHA" ESTADO PENDIENTE ##############################################################################
proyecto1 = Proyecto(nombre='alpha project', descripcion='este proyecto corresponde a Alvaro Rodriguez',
fecha_inicio= '2015-7-10', fecha_fin='2015-8-10', fecha_creacion=date.today(),
lider_proyecto=usuario1, cliente=cliente1, estado='PEN')
proyecto1.save()
#flujo11 = Flujos(nombre= 'primer flujo del proyecto alpha', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto1)
#flujo11.save()
#acti1=Actividad(nombre='Analisis.',orden=1,flujo=flujo11)
#acti1.save()
#acti2=Actividad(nombre='Disenho.',orden=2,flujo=flujo11)
#acti2.save()
#acti3=Actividad(nombre='Desarrollo.',orden=3,flujo=flujo11)
#acti3.save()
#sprint1 = Sprint(nombre='primer sprint del proyecto alpha', proyecto=proyecto1, descripcion='primer sprint correspondiente al proyecto 1',
# duracion_dias= 7, observaciones='Ninguna', estado=pendiente)
#sprint1.save()
#sprint11 = Sprint(nombre='segundo sprint del proyecto alpha', proyecto=proyecto1, descripcion='segundo sprint correspondiente al proyecto 1',
# duracion_dias= 7, observaciones='Ninguna', estado=pendiente)
#sprint11.save()
#desarrollador = Rol(name='desarrollador', proyecto=proyecto1,)
#desarrollador.save()
#desarrollador.permissions.add(permiso)
#desarrollador.permissions.add(permiso2)
#desarrollador.permissions.add(permiso3)
#desarrollador.permissions.add(permiso4)
#desarrollador.save()
#miembro1 = Miembro(rol=desarrollador,proyecto=proyecto1,usuario=usuario2,horas_por_dia=6)
#miembro1.save()
#miembro2 = Miembro(rol=desarrollador,proyecto=proyecto1,usuario=usuario3,horas_por_dia=5)
#miembro2.save()
#miembro3 = Miembro(rol=desarrollador, proyecto=proyecto1,usuario=usuario4,horas_por_dia=4)
#miembro3.save()
#us1p1 = us(nombre='US1 para el proyecto 1',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=10,sprint=sprint1,flujo=flujo11, responsable=miembro1, proyecto=proyecto1,
# estado='TODO', actividad=acti1,estado_de_aprobacion='OK')
#us1p1.save()
#us2p1 = us(nombre='US2 para el proyecto 1',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=18, sprint=sprint1,flujo=flujo11, responsable=miembro2,
# proyecto=proyecto1, estado='TODO', actividad=acworti1,estado_de_aprobacion='OK')
#us2p1.save()
#
#us3p1 = us(nombre='US3 para el proyecto 1', proyecto=proyecto1,valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=30,estado='TODO',sprint=sprint1,flujo=flujo11, responsable=miembro3,
# estado_de_aprobacion='OK',actividad=acti1)
#us3p1.save()
#us4p1 = us(nombre='US4 para el proyecto 1',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=25,sprint=sprint11,flujo=flujo11, responsable=miembro1, proyecto=proyecto1,
# estado='TODO', actividad=acti1,estado_de_aprobacion='OK')
#us4p1.save()
#
#us5p1 = us(nombre='US5 para el proyecto 1',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=15, sprint=sprint11,flujo=flujo11, responsable=miembro2,
# proyecto=proyecto1, estado='TODO', actividad=acti1,estado_de_aprobacion='OK')
#us5p1.save()
#us6p1 = us(nombre='US6 para el proyecto 1', proyecto=proyecto1,valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=20,estado='TODO',sprint=sprint11,flujo=flujo11, responsable=miembro3,
# estado_de_aprobacion='OK',actividad=acti1)
#us6p1.save()
## Proyecto Numero 2 "Betha" ESTADO INICIADO ##############################################################################
## Este proyecto tiene dos sprint, dos flujos.
proyecto2 = Proyecto(nombre='beta project', descripcion='este proyecto corresponde a Homero Simpson', cliente=cliente2,
fecha_inicio= '2015-5-20', fecha_fin='2015-6-20', fecha_creacion=date.today(),
lider_proyecto=usuario2,estado='INI' )
proyecto2.save()
sprint2 = Sprint(nombre='segundo sprint del proyecto betha', proyecto=proyecto2, descripcion='2do sprint correspondiente al proyecto 2',
duracion_dias= 20, observaciones='Ninguna', estado=en_ejecucion)
sprint2.save()
sprint3 = Sprint(nombre='tercer sprint del proyecto betha', proyecto=proyecto2, descripcion='sprint 3 correspondiente al proyecto 2',
duracion_dias= 15, observaciones='Ninguna', estado=pendiente)
sprint3.save()
sprint35 = Sprint(nombre='cuarto sprint del proyecto betha', proyecto=proyecto2, descripcion='sprint 4 correspondiente al proyecto 2',
duracion_dias= 10, observaciones='Ninguna', estado=pendiente)
sprint35.save()
flujo1 = Flujos(nombre= 'primer flujo del proyecto2', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto2)
flujo1.save()
flujo2 = Flujos(nombre= 'segundo flujo del proyecto2', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto2)
flujo2.save()
flujo35 = Flujos(nombre= 'tercer flujo del proyecto2', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto2)
flujo35.save()
developer = Rol(name='developer', proyecto=proyecto2,)
developer.save()
developer.permissions.add(permiso)
developer.permissions.add(permiso2)
developer.permissions.add(permiso3)
developer.permissions.add(permiso4)
developer.save()
miembro4 =Miembro(rol=developer, proyecto=proyecto2,usuario=usuario5,horas_por_dia=4)
miembro4.save()
miembro22 = Miembro(rol=developer,proyecto=proyecto2,usuario=usuario3,horas_por_dia=5)
miembro22.save()
miembro33 = Miembro(rol=developer, proyecto=proyecto2,usuario=usuario4,horas_por_dia=4)
miembro33.save()
actividad11=Actividad(nombre='Analisiss',orden=1,flujo=flujo1)
actividad11.save()
actividad22=Actividad(nombre='Diseno',orden=2,flujo=flujo1)
actividad22.save()
actividad333=Actividad(nombre='Desarrolo',orden=3,flujo=flujo2)
actividad333.save()
actividad35=Actividad(nombre='Post-Des',orden=3,flujo=flujo35)
actividad35.save()
us1p2 = us(nombre='US1 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 6, prioridad= 50, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=50,actividad=actividad22,sprint=sprint2,flujo=flujo1,responsable=miembro4,
estado_de_aprobacion='OK',estado='DOING')
us1p2.save()
us2p2 = us(nombre='US2 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 55, valor_tecnico= 6, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=50,actividad=actividad11,sprint=sprint2,flujo=flujo1, responsable=miembro33,
estado_de_aprobacion='OK',estado='DOING')
us2p2.save()
us3p2 = us(nombre='US3 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 7, prioridad= 75, valor_tecnico= 2, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=40, actividad=actividad22,sprint=sprint2,flujo=flujo1, responsable=miembro22,
estado_de_aprobacion='OK',estado='DONE')
us3p2.save()
us4p2 = us(nombre='US4 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 2, prioridad= 85, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad333,sprint=sprint2,flujo=flujo2,responsable=miembro4,
estado_de_aprobacion='OK',estado='DOING')
us4p2.save()
us5p2 = us(nombre='US5 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 6, prioridad= 95, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad22,sprint=sprint2,flujo=flujo2,responsable=miembro22,
estado_de_aprobacion='OK',estado='TODO')
us5p2.save()
us6p2 = us(nombre='US6 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 15, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad333,sprint=sprint2,flujo=flujo2,responsable=miembro33,
estado_de_aprobacion='OK',estado='DOING')
us6p2.save()
us7p2 = us(nombre='US7 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 40, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad333,sprint=sprint2,flujo=flujo2,responsable=miembro33,
estado_de_aprobacion='OK',estado='TODO')
us7p2.save()
us8p2 = us(nombre='US8 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 60, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=None,actividad=None,sprint=None,flujo=None,responsable=None,
estado_de_aprobacion='PEN',estado='TODO')
us8p2.save()
us9p2 = us(nombre='US9 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 80, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=None,actividad=None,sprint=None,flujo=None, responsable=None,
estado_de_aprobacion='OK',estado='TODO')
us9p2.save()
us10p2 = us(nombre='US10 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 99, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=None, actividad=None,sprint=None,flujo=None, responsable=None,
estado_de_aprobacion='PEN',estado='TODO')
us10p2.save()
us11p2 = us(nombre='US11 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 96, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad35,sprint=sprint35,flujo=flujo35,responsable=miembro4,
estado_de_aprobacion='OK',estado='TODO')
us11p2.save()
us12p2 = us(nombre='US12 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 80, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad35,sprint=sprint35,flujo=flujo35,responsable=miembro22,
estado_de_aprobacion='OK',estado='TODO')
us12p2.save()
us13p2 = us(nombre='US13 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 70, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad35,sprint=sprint35,flujo=flujo35,responsable=miembro33,
estado_de_aprobacion='OK',estado='TODO')
us13p2.save()
us14p2 = us(nombre='US14 para el proyecto 2', proyecto=proyecto2,valor_de_negocio= 5, prioridad= 30, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=10, actividad=actividad35,sprint=sprint35,flujo=flujo35,responsable=miembro33,
estado_de_aprobacion='OK',estado='TODO')
us14p2.save()
reg1= registroTrabajoUs(us=us1p2, descripcion='primer registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us1p2, descripcion='segundo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us1p2, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us1p2, descripcion='cuarto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us1p2, descripcion='quinto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us1p2, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us1p2, descripcion='septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us1p2, descripcion='octavo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us1p2, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us1p2, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us1p2, descripcion='decimo 1er registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us1p2, descripcion='decimo 2do registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-27 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us1p2, descripcion='decimo 3er registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-28 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us1p2, descripcion='decimo cuarto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-29 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us1p2, descripcion='decimo quinto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-30 16:30',archivo_adjunto=None)
reg15.save()
reg16= registroTrabajoUs(us=us1p2, descripcion='decimo sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-31 16:30',archivo_adjunto=None)
reg16.save()
reg17= registroTrabajoUs(us=us1p2, descripcion='decimo septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-6-1 16:30',archivo_adjunto=None)
reg17.save()
reg18= registroTrabajoUs(us=us1p2, descripcion='decimo octavo registro', horas_dedicadas=6, fecha_hora_creacion='2015-6-2 16:30',archivo_adjunto=None)
reg18.save()
reg19= registroTrabajoUs(us=us1p2, descripcion='decimo noveno registro', horas_dedicadas=6, fecha_hora_creacion='2015-6-3 16:30',archivo_adjunto=None)
reg19.save()
reg20= registroTrabajoUs(us=us1p2, descripcion='vigesimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-6-4 16:30',archivo_adjunto=None)
reg20.save()
reg1= registroTrabajoUs(us=us2p2, descripcion='primer registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us2p2, descripcion='segundo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us2p2, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us2p2, descripcion='cuarto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us2p2, descripcion='quinto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us2p2, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us2p2, descripcion='septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us2p2, descripcion='octavo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us2p2, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us2p2, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us2p2, descripcion='decimo 1er registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us2p2, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-27 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us2p2, descripcion='decimo 3er registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-28 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us2p2, descripcion='decimo cuarto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-29 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us2p2, descripcion='decimo quinto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-30 16:30',archivo_adjunto=None)
reg15.save()
reg16= registroTrabajoUs(us=us2p2, descripcion='decimo sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-31 16:30',archivo_adjunto=None)
reg16.save()
reg17= registroTrabajoUs(us=us2p2, descripcion='decimo septimo registro', horas_dedicadas=5, fecha_hora_creacion='2015-6-1 16:30',archivo_adjunto=None)
reg17.save()
reg18= registroTrabajoUs(us=us2p2, descripcion='decimo octavo registro', horas_dedicadas=3, fecha_hora_creacion='2015-6-2 16:30',archivo_adjunto=None)
reg18.save()
reg19= registroTrabajoUs(us=us2p2, descripcion='decimo noveno registro', horas_dedicadas=7, fecha_hora_creacion='2015-6-3 16:30',archivo_adjunto=None)
reg19.save()
reg20= registroTrabajoUs(us=us2p2, descripcion='vigesimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-6-4 16:30',archivo_adjunto=None)
reg20.save()
reg1= registroTrabajoUs(us=us3p2, descripcion='primer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us3p2, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us3p2, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us3p2, descripcion='cuarto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us3p2, descripcion='quinto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us3p2, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us3p2, descripcion='septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us3p2, descripcion='octavo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us3p2, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us3p2, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us3p2, descripcion='decimo 1er registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us3p2, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-27 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us3p2, descripcion='decimo 3er registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-28 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us3p2, descripcion='decimo cuarto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-29 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us3p2, descripcion='decimo quinto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-30 16:30',archivo_adjunto=None)
reg15.save()
reg16= registroTrabajoUs(us=us3p2, descripcion='decimo sexto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-31 16:30',archivo_adjunto=None)
reg16.save()
reg17= registroTrabajoUs(us=us3p2, descripcion='decimo septimo registro', horas_dedicadas=2, fecha_hora_creacion='2015-6-1 16:30',archivo_adjunto=None)
reg17.save()
reg18= registroTrabajoUs(us=us3p2, descripcion='decimo octavo registro', horas_dedicadas=5, fecha_hora_creacion='2015-6-2 16:30',archivo_adjunto=None)
reg18.save()
reg19= registroTrabajoUs(us=us3p2, descripcion='decimo noveno registro', horas_dedicadas=8, fecha_hora_creacion='2015-6-3 16:30',archivo_adjunto=None)
reg19.save()
reg20= registroTrabajoUs(us=us3p2, descripcion='vigesimo registro', horas_dedicadas=2, fecha_hora_creacion='2015-6-4 16:30',archivo_adjunto=None)
reg20.save()
## Proyecto Numero 3 'Gamma' ESTADO FINALIZADO ##############################################################################
proyecto3 = Proyecto(nombre='gamma project', descripcion='este proyecto corresponde a Walter White',
fecha_inicio= '2015-5-10',cliente=cliente3, fecha_fin='2015-6-15',
fecha_creacion=date.today(),lider_proyecto=usuario3,estado='FIN')
#proyecto4 = Proyecto(nombre='delta project', descripcion='este proyecto corresponde a John Snow',
# fecha_inicio= date.today(), fecha_fin=date.today(), fecha_creacion=date.today(),
# lider_proyecto=usuario4, cliente=cliente3,estado='INI')
#proyecto5 = Proyecto(nombre='epsilon project', descripcion='este proyecto corresponde a Bruce Banner',
# fecha_inicio= date.today(), fecha_fin=date.today(), fecha_creacion=date.today(),
# lider_proyecto=usuario5, cliente=cliente2,estado='FIN')
proyecto3.save()
#proyecto4.save()
#proyecto5.save()
#sprint4 = Sprint(nombre='segundo Sprint del proyecto betha', proyecto=proyecto2, descripcion='2do sprint correspondiente al proyecto 2',
# duracion_dias= 15, observaciones='Ninguna', estado=finalizado)
#sprint4.save()
sprint5 = Sprint(nombre='primer sprint del proyecto gamma', proyecto=proyecto3, descripcion='1er sprint correspondiente al proyecto 3',
duracion_dias= 15, observaciones='Ninguna', estado=finalizado)
sprint5.save()
sprint6 = Sprint(nombre='segundo sprint del proyecto gamma', proyecto=proyecto3, descripcion='2do sprint correspondiente al proyecto 3',
duracion_dias= 15, observaciones='Ninguna', estado=finalizado)
sprint6.save()
sprint7 = Sprint(nombre='tercer sprint del proyecto gamma', proyecto=proyecto3, descripcion='3er sprint correspondiente al proyecto 3',
duracion_dias= 4, observaciones='Ninguna', estado=finalizado)
sprint7.save()
#sprint7 = Sprint(nombre='3 Sprint Pro 4', proyecto=proyecto3, descripcion='3er sprint correspondiente al proyecto 3',
# duracion_dias= 15, observaciones='Ninguna', estado=finalizado)
#sprint7.save()
#sprint8 = Sprint(nombre='2SprintPro4', proyecto=proyecto3, descripcion='4to sprint correspondiente al proyecto 3',
# duracion_dias= 15, observaciones='Ninguna', estado=pendiente)
#sprint8.save()
flujo3 = Flujos(nombre= '1er flujo del proyecto gamma', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto3)
flujo3.save()
flujo4 = Flujos(nombre= '2do flujo del proyecto gamma', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto3)
flujo4.save()
flujo5 = Flujos(nombre= '3er flujo del proyecto gamma', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto3)
flujo5.save()
#flujo5 = Flujos(nombre= '3er flujo del proyecto2', descripcion='ninguna', fecha_hora_creacion=date.today(), proyecto=proyecto2)
#flujo5.save()
#desarrollador = Group(name='desarrollador')
#desarrollador.save()
#desarrollador.permissions.add(permiso)
#desarrollador.save()
develop = Rol(name='develop', proyecto=proyecto3,)
develop.save()
develop.permissions.add(permiso)
develop.permissions.add(permiso2)
develop.permissions.add(permiso3)
develop.permissions.add(permiso4)
develop.save()
#rolMiembro2 = Rol(proyecto=proyecto2, Group=desarrollador)
#rolMiembro3 = Rol(proyecto=proyecto3, Group=desarrollador)
#rolMiembro.permissions=[can_add_rol,can_change_rol]
#rolMiembro1.permissions=[Can add us,Can add miembro,Can add rol,Can change rol,Can add sprint, Can add flujo,Can change flujo, Can add actividad,
# Can change actividad, Can add registro trabajo us,]
#rolMiembro1.permissions=[]
#rolMiembro2.permissions=['Can add us','Can add miembro','Can add rol','Can change rol','Can add sprint', 'Can add flujo','Can change flujo', 'Can add actividad',
# 'Can change actividad', 'Can add registro trabajo us',]
#rolMiembro2.save()
#rolMiembro3.permissions=['Can add us','Can add miembro','Can add rol','Can change rol','Can add sprint', 'Can add flujo','Can change flujo', 'Can add actividad',
# 'Can change actividad', 'Can add registro trabajo us',]
#rolMiembro3.save()
miembro5 =Miembro(rol=develop, proyecto=proyecto3,usuario=usuario1,horas_por_dia=4)
miembro5.save()
miembro222 = Miembro(rol=develop,proyecto=proyecto3,usuario=usuario3,horas_por_dia=5)
miembro222.save()
miembro333 = Miembro(rol=develop, proyecto=proyecto3,usuario=usuario4,horas_por_dia=4)
miembro333.save()
actividad1=Actividad(nombre='Analisis',orden=1,flujo=flujo3)
actividad1.save()
actividad2=Actividad(nombre='Disenho',orden=2,flujo=flujo3)
actividad2.save()
actividad3=Actividad(nombre='Desarrollo',orden=3,flujo=flujo3)
actividad3.save()
actividad4=Actividad(nombre='Mantenimiento',orden=1,flujo=flujo4)
actividad4.save()
actividad5=Actividad(nombre='Post-Desarrollo',orden=2,flujo=flujo4)
actividad5.save()
actividad6=Actividad(nombre='Mantenimiento.',orden=2,flujo=flujo5)
actividad6.save()
#us1 = us(nombre='US5 para el proyecto 2',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=10,sprint=sprint4, responsable=miembro33, proyecto=proyecto2,
# estado='TODO', actividad=actividad1,flujo=flujo2,estado_de_aprobacion='OK')
#us1.save()
#us2 = us(nombre='US6 para el proyecto 2',valor_de_negocio= 5, prioridad= 5, valor_tecnico= 5, descripcion='vacio',
# duracion_horas=10, duracion_horas_en_sprint=10,sprint=sprint4, responsable=miembro22, proyecto=proyecto2,
# estado='TODO', actividad=actividad4,flujo=flujo5,estado_de_aprobacion='OK')
#us2.save()
#US DEL PROYECTO GAMMA
us1p3 = us(nombre='US1 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 10, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=40,estado='DONE',actividad=actividad3,sprint=sprint5,flujo=flujo3, responsable=miembro333,
estado_de_aprobacion='FIN')
us1p3.save()
us2p3 = us(nombre='US2 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 15, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=60,estado='DONE',actividad=actividad5,sprint=sprint5,flujo=flujo4,responsable=miembro5,
estado_de_aprobacion='FIN')
us2p3.save()
us3p3 = us(nombre='US3 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 25, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=45,estado='DONE',actividad=actividad3,sprint=sprint5,flujo=flujo3, responsable=miembro222,
estado_de_aprobacion='FIN')
us3p3.save()
us4p3 = us(nombre='US4 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 35, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=35,estado='DONE',actividad=actividad5,sprint=sprint6,flujo=flujo4, responsable=miembro222,
estado_de_aprobacion='FIN')
us4p3.save()
us5p3 = us(nombre='US5 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 40, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=25,estado='DONE',actividad=actividad3,sprint=sprint5,flujo=flujo3,responsable=miembro333,
estado_de_aprobacion='FIN')
us5p3.save()
us6p3 = us(nombre='US6 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 55, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=28,estado='DONE',actividad=actividad5,sprint=sprint6,flujo=flujo4, responsable=miembro333,
estado_de_aprobacion='FIN')
us6p3.save()
us7p3 = us(nombre='US7 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 75, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=50,estado='DONE',actividad=actividad3,sprint=sprint5,flujo=flujo3, responsable=miembro5,
estado_de_aprobacion='FIN')
us7p3.save()
us8p3 = us(nombre='US8 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 85, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=30,estado='DONE',actividad=actividad5,sprint=sprint6,flujo=flujo4,responsable=miembro5,
estado_de_aprobacion='FIN')
us8p3.save()
us9p3 = us(nombre='US9 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 90, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=35,estado='DONE',actividad=actividad3,sprint=sprint5,flujo=flujo3, responsable=miembro333,
estado_de_aprobacion='FIN')
us9p3.save()
us10p3 = us(nombre='US10 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 80, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=60,estado='DONE',actividad=actividad5,sprint=sprint6,flujo=flujo4, responsable=miembro222,
estado_de_aprobacion='FIN')
us10p3.save()
reg1= registroTrabajoUs(us=us1p3, descripcion='primer registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us1p3, descripcion='segundo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us1p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us1p3, descripcion='cuarto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us1p3, descripcion='quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us1p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us1p3, descripcion='septimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us1p3, descripcion='octavo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us1p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us1p3, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us1p3, descripcion='decimo 1er registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us1p3, descripcion='decimo 2do registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us1p3, descripcion='decimo 3er registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us1p3, descripcion='decimo cuarto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us1p3, descripcion='decimo quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us2p3, descripcion='primer registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us2p3, descripcion='segundo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us2p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us2p3, descripcion='cuarto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us2p3, descripcion='quinto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us2p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us2p3, descripcion='septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us2p3, descripcion='octavo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us2p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us2p3, descripcion='decimo registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us2p3, descripcion='decimo 1er registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us2p3, descripcion='decimo 2do registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us2p3, descripcion='decimo 3er registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us2p3, descripcion='decimo cuarto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us2p3, descripcion='decimo quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us3p3, descripcion='primer registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us3p3, descripcion='segundo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us3p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us3p3, descripcion='cuarto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us3p3, descripcion='quinto registro', horas_dedicadas=7, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us3p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us3p3, descripcion='septimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us3p3, descripcion='octavo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us3p3, descripcion='noveno registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us3p3, descripcion='decimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us3p3, descripcion='decimo 1er registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us3p3, descripcion='decimo 2do registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us3p3, descripcion='decimo 3er registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us3p3, descripcion='decimo cuarto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us3p3, descripcion='decimo quinto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us4p3, descripcion='primer registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us4p3, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us4p3, descripcion='tercer registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us4p3, descripcion='cuarto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us4p3, descripcion='quinto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us4p3, descripcion='sexto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us4p3, descripcion='septimo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us4p3, descripcion='octavo registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us4p3, descripcion='noveno registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us4p3, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us4p3, descripcion='decimo 1er registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us4p3, descripcion='decimo 2do registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us4p3, descripcion='decimo 3er registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us4p3, descripcion='decimo cuarto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us4p3, descripcion='decimo quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us5p3, descripcion='primer registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us5p3, descripcion='segundo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us5p3, descripcion='tercer registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us5p3, descripcion='cuarto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us5p3, descripcion='quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us5p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us5p3, descripcion='septimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us5p3, descripcion='octavo registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us5p3, descripcion='noveno registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us5p3, descripcion='decimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us5p3, descripcion='decimo 1er registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us5p3, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us5p3, descripcion='decimo 3er registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us5p3, descripcion='decimo cuarto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us5p3, descripcion='decimo quinto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us6p3, descripcion='primer registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us6p3, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us6p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us6p3, descripcion='cuarto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us6p3, descripcion='quinto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us6p3, descripcion='sexto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us6p3, descripcion='septimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us6p3, descripcion='octavo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us6p3, descripcion='noveno registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us6p3, descripcion='decimo registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us6p3, descripcion='decimo 1er registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us6p3, descripcion='decimo 2do registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us6p3, descripcion='decimo 3er registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us6p3, descripcion='decimo cuarto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us6p3, descripcion='decimo quinto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us7p3, descripcion='primer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us7p3, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us7p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us7p3, descripcion='cuarto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us7p3, descripcion='quinto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us7p3, descripcion='sexto registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us7p3, descripcion='septimo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us7p3, descripcion='octavo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us7p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us7p3, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us7p3, descripcion='decimo 1er registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us7p3, descripcion='decimo 2do registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us7p3, descripcion='decimo 3er registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us7p3, descripcion='decimo cuarto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us7p3, descripcion='decimo quinto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us8p3, descripcion='primer registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us8p3, descripcion='segundo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us8p3, descripcion='tercer registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us8p3, descripcion='cuarto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us8p3, descripcion='quinto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us8p3, descripcion='sexto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us8p3, descripcion='septimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us8p3, descripcion='octavo registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us8p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us8p3, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us8p3, descripcion='decimo 1er registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us8p3, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us8p3, descripcion='decimo 3er registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us8p3, descripcion='decimo cuarto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us8p3, descripcion='decimo quinto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us9p3, descripcion='primer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us9p3, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us9p3, descripcion='tercer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us9p3, descripcion='cuarto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us9p3, descripcion='quinto registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us9p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us9p3, descripcion='septimo registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us9p3, descripcion='octavo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us9p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us9p3, descripcion='decimo registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us9p3, descripcion='decimo 1er registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us9p3, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us9p3, descripcion='decimo 3er registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us9p3, descripcion='decimo cuarto registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us9p3, descripcion='decimo quinto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
reg1= registroTrabajoUs(us=us10p3, descripcion='primer registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us10p3, descripcion='segundo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us10p3, descripcion='tercer registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us10p3, descripcion='cuarto registro', horas_dedicadas=6, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg5= registroTrabajoUs(us=us10p3, descripcion='quinto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-16 16:30',archivo_adjunto=None)
reg5.save()
reg6= registroTrabajoUs(us=us10p3, descripcion='sexto registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-17 16:30',archivo_adjunto=None)
reg6.save()
reg7= registroTrabajoUs(us=us10p3, descripcion='septimo registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-18 16:30',archivo_adjunto=None)
reg7.save()
reg8= registroTrabajoUs(us=us10p3, descripcion='octavo registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-19 16:30',archivo_adjunto=None)
reg8.save()
reg9= registroTrabajoUs(us=us10p3, descripcion='noveno registro', horas_dedicadas=5, fecha_hora_creacion='2015-5-20 16:30',archivo_adjunto=None)
reg9.save()
reg10= registroTrabajoUs(us=us10p3, descripcion='decimo registro', horas_dedicadas=1, fecha_hora_creacion='2015-5-21 16:30',archivo_adjunto=None)
reg10.save()
reg11= registroTrabajoUs(us=us10p3, descripcion='decimo 1er registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-22 16:30',archivo_adjunto=None)
reg11.save()
reg12= registroTrabajoUs(us=us10p3, descripcion='decimo 2do registro', horas_dedicadas=3, fecha_hora_creacion='2015-5-23 16:30',archivo_adjunto=None)
reg12.save()
reg13= registroTrabajoUs(us=us10p3, descripcion='decimo 3er registro', horas_dedicadas=7, fecha_hora_creacion='2015-5-24 16:30',archivo_adjunto=None)
reg13.save()
reg14= registroTrabajoUs(us=us10p3, descripcion='decimo cuarto registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-25 16:30',archivo_adjunto=None)
reg14.save()
reg15= registroTrabajoUs(us=us10p3, descripcion='decimo quinto registro', horas_dedicadas=2, fecha_hora_creacion='2015-5-26 16:30',archivo_adjunto=None)
reg15.save()
#miembro55 =Miembro(rol=develop, proyecto=proyecto3,usuario=usuario1,horas_por_dia=3)
#miembro55.save()
#miembro25 = Miembro(rol=develop,proyecto=proyecto3,usuario=usuario3,horas_por_dia=3)
#miembro25.save()
us11p3 = us(nombre='US11 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 60, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=12,actividad=actividad6,sprint=sprint7,flujo=flujo5,responsable=miembro5,
estado_de_aprobacion='FIN',estado='DONE')
us11p3.save()
us12p3 = us(nombre='US12 para el proyecto 3', proyecto=proyecto3,valor_de_negocio= 5, prioridad= 20, valor_tecnico= 5, descripcion='vacio',
duracion_horas=10, duracion_horas_en_sprint=12,actividad=actividad6,sprint=sprint7,flujo=flujo5,responsable=miembro333,
estado_de_aprobacion='FIN',estado='DONE')
us12p3.save()
reg1= registroTrabajoUs(us=us11p3, descripcion='primer registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us11p3, descripcion='segundo registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us11p3, descripcion='tercer registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us11p3, descripcion='cuarto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
reg1= registroTrabajoUs(us=us12p3, descripcion='primer registro', horas_dedicadas=8, fecha_hora_creacion='2015-5-10 16:30',archivo_adjunto=None)
reg1.save()
reg2= registroTrabajoUs(us=us12p3, descripcion='segundo registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-11 16:30',archivo_adjunto=None)
reg2.save()
reg3= registroTrabajoUs(us=us12p3, descripcion='tercer registro', horas_dedicadas=4, fecha_hora_creacion='2015-5-12 16:30',archivo_adjunto=None)
reg3.save()
reg4= registroTrabajoUs(us=us12p3, descripcion='cuarto registro', horas_dedicadas=0, fecha_hora_creacion='2015-5-13 16:30',archivo_adjunto=None)
reg4.save()
|
alforro/sgpa2015
|
poblacion.py
|
Python
|
gpl-2.0
| 60,019
|
import json
from django.db import models
from django.core.exceptions import ValidationError
class Poll(models.Model):
name = models.CharField(max_length=500, unique=True)
slug = models.SlugField(max_length=500, unique=True)
thread_id = models.CharField(max_length=256, unique=True)
status = models.ForeignKey('PollStatus')
description = models.TextField()
open_date = models.DateField()
close_date = models.DateField()
@property
def results(self):
choices = []
for choice in self.choice_set.all():
choices.append({choice: choice.vote_set.count()})
return choices
@property
def json_results(self):
# merge this with above method at some point
choices = []
for choice in self.choice_set.all():
choices.append({'choice': choice.name,
'votes': choice.vote_set.count()})
return json.dumps(choices)
@property
def is_active(self):
return self.status.name == 'Active'
def __unicode__(self):
return self.name
class Choice(models.Model):
poll = models.ForeignKey('Poll')
name = models.CharField(max_length=500)
@property
def vote_count(self):
return self.vote_set.filter(user__blacklisted=False).count()
def __unicode__(self):
return "%s - %s" % (self.poll, self.name)
class KappaUser(models.Model):
username = models.CharField(max_length=128, unique=True)
blacklisted = models.BooleanField(default=False)
def __unicode__(self):
return self.username
class Vote(models.Model):
user = models.ForeignKey('KappaUser')
choice = models.ForeignKey('Choice')
comment_id = models.CharField(max_length=100)
def validate_unique(self, *args, **kwargs):
super(Vote, self).validate_unique(*args, **kwargs)
sibling_choices = self.choice.poll.choice_set
# values_list returns a list like [(1,), (2,)]
# made of user ids
f = lambda x: x[0]
voters = map(f, sibling_choices.values_list('vote__user'))
print voters
if self.user.id in voters:
raise ValidationError({'user':
'This user already voted in this poll'})
def __unicode__(self):
return "%s - %s - %s" % (self.user, self.choice, self.choice.poll)
class PollStatus(models.Model):
name = models.CharField(max_length=128)
def __unicode__(self):
return self.name
|
kappapolls/kappapolls
|
polls/models.py
|
Python
|
gpl-2.0
| 2,475
|
#!/usr/bin/env python
#
# Copyright 2011 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
"""Kegboard tester program.
Currently just cycles relays on and off."""
from pykeg.core import importhacks
import gflags
import serial
import time
from pykeg.core import kb_app
from pykeg.hw.kegboard import kegboard
FLAGS = gflags.FLAGS
class KegboardMonitorApp(kb_app.App):
def _SetupSerial(self):
self._logger.info('Setting up serial port...')
self._serial_fd = serial.Serial(FLAGS.kegboard_device, FLAGS.kegboard_speed)
self._reader = kegboard.KegboardReader(self._serial_fd)
def _MainLoop(self):
self._SetupSerial()
self._logger.info('Starting reader loop...')
ping_message = kegboard.PingCommand()
self._serial_fd.write(ping_message.ToBytes())
while not self._do_quit:
for relay in (0, 1):
for mode in (1, 0):
cmd = kegboard.SetOutputCommand()
cmd.SetValue('output_id', relay)
cmd.SetValue('output_mode', mode)
self._logger.info('Sending relay command: %s' % cmd)
self._reader.WriteMessage(cmd)
time.sleep(1.0)
self._serial_fd.close()
self._logger.info('Reader loop ended.')
if __name__ == '__main__':
KegboardMonitorApp.BuildAndRun()
|
Alwnikrotikz/kegbot
|
pykeg/src/pykeg/bin/kegboard-tester.py
|
Python
|
gpl-2.0
| 1,995
|
# 2013.05.22 08:34:34 UTC
#Embedded file name: /usr/lib/enigma2/python/Plugins/Extensions/IniAirPlayer/__init__.py
from enigma import getDesktop
from skin import loadSkin
import os
from sys import version_info
from Components.config import config, ConfigSubsection, ConfigSelection
def getSkins():
print '[AirPlayer] search for Skins'
skins = []
skindir = '/usr/lib/enigma2/python/Plugins/Extensions/IniAirPlayer/Skins/'
for o in os.listdir(skindir):
if os.path.isdir(skindir + o):
print '[AirPlayer] found Skin', o
skins.append((o, o))
return skins
currentArch = 'sh4p27'
def getSkinPath(name):
skinName = name
dSize = getDesktop(0).size()
skinpath = '/usr/lib/enigma2/python/Plugins/Extensions/IniAirPlayer/Skins/%s/%sx%s/skin.xml' % (skinName, str(dSize.width()), str(dSize.height()))
if os.path.exists(skinpath):
return skinpath
else:
print '[AirPlayer] skin ', skinpath, 'does not exist'
return None
def installIpk(link):
cmd = '\nBIN=""\nopkg > /dev/null 2>/dev/null\nif [ $? == "1" ]; then\n BIN="opkg"\nelse\n ipkg > /dev/null 2>/dev/null\n if [ $? == "1" ]; then\n BIN="ipkg"\n fi\nfi\necho "Binary: $BIN"\n\nif [ $BIN != "" ]; then\n $BIN update\n if [ $BIN == "opkg" ]; then\n OPARAM="--force-overwrite --force-downgrade --force-reinstall"\n else\n OPARAM="-force-overwrite -force-downgrade -force-reinstall"\n fi\n ( $BIN install %s $OPARAM; )\nfi' % link
os.system(cmd)
config.plugins.airplayer = ConfigSubsection()
config.plugins.airplayer.skin = ConfigSelection(default='Classic', choices=getSkins())
skinPath = getSkinPath('Classic')
try:
path = getSkinPath(config.plugins.airplayer.skin.value)
if path is not None:
skinPath = path
except Exception as e:
print '[AirPlayer] error reading skin ', e
# new oe-a have some kind of issues in libcrypto-compat
from Tools.Directories import fileExists
if not fileExists("/usr/lib/libssl.so.0.9.8"):
os.system("ln -s /usr/lib/libssl.so.1.0.0 /usr/lib/libssl.so.0.9.8")
print '[AirPlayer] using skin ', skinPath
loadSkin(skinPath)
print '[AirPlayer] running python ', version_info
try:
import ctypes
except Exception as e:
print '[AirPlayer] ctypes missing'
print '[AirPlayer] inst python-ctypes 2.7'
installIpk('http://airplayer.googlecode.com/files/python-ctypes_2.7_mips32el.ipk')
try:
import plistlib
except Exception as e:
print '[AirPlayer] python-plistlibb missing'
print '[AirPlayer] install python-plistb 2.7'
installIpk('http://airplayer.googlecode.com/files/python-plistlibb_2.7_all.ipk')
try:
import shutil
except Exception as e:
print '[AirPlayer] python-shell missing'
print '[AirPlayer] install python-shell 2.7'
installIpk('http://airplayer.googlecode.com/files/python-shell_2.7_all.ipk')
try:
import subprocess
except Exception as e:
print '[AirPlayer] python-subprocess missing'
print '[AirPlayer] install python-subprocess 2.7'
installIpk('http://airplayer.googlecode.com/files/python-subprocess_2.7_all.ipk')
if currentArch != 'sh4' and currentArch != 'sh4p27':
if not os.path.isfile('/usr/lib/gstreamer-0.10/libgstfragmented.so'):
installIpk('gst-plugins-bad-fragmented')
installIpk('gst-plugins-fragmented')
try:
if os.path.exists('/etc/avahi/services/airplay.service'):
print '[AirPlayer] try to remove avahi service file'
os.remove('/etc/avahi/services/airplay.service')
except Exception:
pass
|
popazerty/e2-gui
|
lib/python/Plugins/Extensions/IniAirPlayer/__init__.py
|
Python
|
gpl-2.0
| 3,530
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2004 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import csv
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from .tabbeddoc import *
class CSVTab(TabbedDoc):
def __init__(self, columns):
TabbedDoc.__init__(self, columns)
self.filename = None
self.f = None
self.dlist = []
self.writer = None
def open(self, filename):
if filename[-4:] != ".csv":
self.filename = filename + ".csv"
else:
self.filename = filename
self.f = open(self.filename, "wb")
self.writer = csv.writer(self.f)
def close(self):
assert(self.f)
self.f.close()
def start_row(self):
self.dlist = []
def end_row(self):
self.writer.writerow(self.dlist)
def write_cell(self, text):
self.dlist.append(text)
def start_page(self):
pass
def end_page(self):
pass
if __name__ == "__main__":
file = CSVTab(2, 3)
file.open("test.csv")
file.start_page()
for i in [ ('one', 'two', 'three'), ('fo"ur', 'fi,ve', 'six') ]:
file.start_row()
for j in i:
file.write_cell(j)
file.end_row()
file.end_page()
file.close()
|
Forage/Gramps
|
gramps/gen/utils/docgen/csvtab.py
|
Python
|
gpl-2.0
| 2,287
|
from django.db import models
from django.core.mail import send_mail
from django.core.validators import RegexValidator
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
class AuthUserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
if not username:
raise ValueError('The given username must be set')
username = self.normalize_email(username)
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
class AuthUser(AbstractBaseUser, PermissionsMixin):
# Redefine the basic fields that would normally be defined in User
username = models.EmailField(
verbose_name='Username (email address)',
help_text=_('We use your email address as the username.'),
unique=True,
max_length=254
)
email = models.EmailField(verbose_name='email address', max_length=254, blank=True)
first_name = models.CharField(max_length=30, null=True, blank=True)
last_name = models.CharField(max_length=50, null=True, blank=True)
# last_login = models.DateTimeField(auto_now=True)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
is_active = models.BooleanField(default=True, null=False)
is_staff = models.BooleanField(default=False, null=False)
# Our own fields
objects = AuthUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
def __unicode__(self):
return self.email
|
cs98jrb/Trinity
|
mysite/accounts/models.py
|
Python
|
gpl-2.0
| 2,968
|
#!/usr/bin/env python3
import sys
import math
def readFile(fileName):
data=(open(fileName).readlines())
keyData=data[0].split()
brutData=[line.split() for line in data[1:]]
res={}
for index in range(len(keyData)):
fileNameKey=fileName.replace("res","")
dataIndex=[line[index] for line in brutData]
res[keyData[index]]=(min(dataIndex),max(dataIndex))
return res
def computeEvalError(dataNative, data):
res={}
for key in dataNative.keys():
resIEEE=float(dataNative[key][0])
evalError= - math.log2(max(abs(float(data[key][1]) - resIEEE),
abs(float(data[key][0]) - resIEEE)) / resIEEE)
res[key]=evalError
return res
def loadRef(fileName, num=2):
res={}
for line in open(fileName):
spline=line.split(":")
typeRealtype=spline[0].split()[0]
correction=spline[0].split()[1]
nbBitStr=spline[1].strip()
if nbBitStr in ["24","53"]:
res[(typeRealtype, correction)]=float(nbBitStr)
continue
[valueLow,valueUp]=nbBitStr[1:-1].split(",")
if(float(valueUp)!=float(valueLow)):
print("Please Increase the mpfi precision")
sys.exit()
value=float(valueUp)
res[(typeRealtype, correction)]=value
return res
def main(reference=None):
output=open("tabAster.tex","w")
outputReg=open("testReg","w")
keys=["Native", "Randominterlibm", "Randomverrou", "Randomverrou+interlibm"]
data={}
strLatex=""
for i in range(len(keys)):
key=keys[i]
data[key]=readFile("res"+key+".dat")
# for key in sorted(keys[1:]):
for i in range(1,len(keys)):
key=keys[i]
outputReg.write(key+"\n")
evalError=computeEvalError(data["Native"], data[key])
for keyCase in sorted(evalError.keys()):
outputReg.write(keyCase +" "+str(evalError[keyCase])+"\n")
output.write(r"\begin{table}" +" \n")
output.write(r"\begin{center}" +" \n")
output.write(r"\begin{tabular}{l@{~}lccccc}\toprule" +" \n")
output.write(r"& & \multicolumn{2}{c}{single precision}& \multicolumn{2}{c}{double precision}\\"+"\n"+
r"&& first & second & first & second \\ \midrule"+"\n")
if reference!=None:
output.write("&IEEE Error & %.2f & %.2f & %.2f & %.2f"%(
reference[("Float","Before")],reference[("Float","After")],
reference[("Double","Before")], reference[("Double","After")])
+ r"\\\midrule"+"\n")
for i in range(1,len(keys)):
key=keys[i]
evalError=computeEvalError(data["Native"], data[key])
keyConvert={"Randominterlibm": r"\textit{(i)}&interlibm",
"Randomverrou": r"\textit{(ii)}&verrou",
"Randomverrou+interlibm":r"\textit{(iii)}&verrou+interlib"}
lineStr=keyConvert[key]+ " "
for typeFP in ["Float","Double"]:
lineStr+=r"&%.2f & %.2f "%(evalError["BeforeCorrection_"+typeFP], evalError["AfterCorrection_"+typeFP])
lineStr+=r"\\"+"\n"
output.write(lineStr)
output.write(r"\bottomrule"+"\n")
output.write(r"\end{tabular}"+"\n")
output.write(r"\end{center}" +" \n")
output.write(r"\caption{Number of significant bits for 4~implementations of function $f(a, a+6.ulp(a))$, as assessed by 3~techniques.}"+"\n")
output.write(r"\label{sdAster}"+"\n")
output.write(r"\end{table}"+"\n")
if __name__=="__main__":
reference=loadRef("reference.dat")
if len(reference)!=4:
reference=None
main(reference)
|
edf-hpc/verrou
|
unitTest/check-libM/genTab.py
|
Python
|
gpl-2.0
| 3,734
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This file implements all methods necessary for working with users and
sessions in Invenio. Contains methods for logging/registration
when a user log/register into the system, checking if it is a guest
user or not.
At the same time this presents all the stuff it could need with
sessions managements, working with websession.
It also contains Apache-related user authentication stuff.
"""
__revision__ = "$Id$"
import cgi
import urllib
import urlparse
import socket
import smtplib
import re
import random
import datetime
from flask import Request
from six import iteritems
from socket import gaierror
from invenio.base.wrappers import lazy_import
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_ACCESS_CONTROL_LEVEL_GUESTS, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN, \
CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS, \
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT, \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_BIBAUTHORID_ENABLED, \
CFG_SITE_RECORD
try:
from flask import session
except ImportError:
pass
from invenio.legacy.dbquery import run_sql, OperationalError
from invenio.utils.serializers import serialize_via_marshal, \
deserialize_via_marshal
from invenio.base.i18n import gettext_set_language, wash_languages, wash_language
from invenio.ext.email import send_email
from invenio.ext.logging import register_exception
from invenio.ext.sqlalchemy import db
from invenio.legacy.websession.dblayer import get_groups
from invenio.legacy.external_authentication import InvenioWebAccessExternalAuthError
from invenio.modules.accounts.models import User
from invenio.legacy.websession.webuser_config import CFG_WEBUSER_USER_TABLES
acc_get_role_id = lazy_import('invenio.modules.access.control:acc_get_role_id')
acc_get_action_roles = lazy_import('invenio.modules.access.control:acc_get_action_roles')
acc_get_action_id = lazy_import('invenio.modules.access.control:acc_get_action_id')
acc_is_user_in_role = lazy_import('invenio.modules.access.control:acc_is_user_in_role')
acc_find_possible_activities = lazy_import('invenio.modules.access.control:acc_find_possible_activities')
mail_cookie_create_mail_activation = lazy_import('invenio.modules.access.mailcookie:mail_cookie_create_mail_activation')
acc_firerole_check_user = lazy_import('invenio.modules.access.firerole:acc_firerole_check_user')
load_role_definition = lazy_import('invenio.modules.access.firerole:load_role_definition')
SUPERADMINROLE = lazy_import('invenio.modules.access.local_config:SUPERADMINROLE')
CFG_EXTERNAL_AUTH_USING_SSO = lazy_import('invenio.modules.access.local_config:CFG_EXTERNAL_AUTH_USING_SSO')
CFG_EXTERNAL_AUTHENTICATION = lazy_import('invenio.modules.access.local_config:CFG_EXTERNAL_AUTHENTICATION')
CFG_WEBACCESS_MSGS = lazy_import('invenio.modules.access.local_config:CFG_WEBACCESS_MSGS')
CFG_WEBACCESS_WARNING_MSGS = lazy_import('invenio.modules.access.local_config:CFG_WEBACCESS_WARNING_MSGS')
CFG_EXTERNAL_AUTH_DEFAULT = lazy_import('invenio.modules.access.local_config:CFG_EXTERNAL_AUTH_DEFAULT')
CFG_TEMP_EMAIL_ADDRESS = lazy_import('invenio.modules.access.local_config:CFG_TEMP_EMAIL_ADDRESS')
# import invenio.legacy.template
# tmpl = invenio.legacy.template.load('websession')
tmpl = lazy_import('invenio.legacy.websession.templates:Template')()
# tmpl = object
re_invalid_nickname = re.compile(""".*[,'@]+.*""")
# pylint: disable=C0301
def createGuestUser():
"""Create a guest user , insert into user null values in all fields
createGuestUser() -> GuestUserID
"""
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
try:
return run_sql("insert into user (email, note) values ('', '1')")
except OperationalError:
return None
else:
try:
return run_sql("insert into user (email, note) values ('', '0')")
except OperationalError:
return None
def page_not_authorized(req, referer='', uid='', text='', navtrail='', ln=CFG_SITE_LANG,
navmenuid=""):
"""Show error message when user is not authorized to do something.
@param referer: in case the displayed message propose a login link, this
is the url to return to after logging in. If not specified it is guessed
from req.
@param uid: the uid of the user. If not specified it is guessed from req.
@param text: the message to be displayed. If not specified it will be
guessed from the context.
"""
from invenio.legacy.webpage import page
_ = gettext_set_language(ln)
if not referer:
referer = req.unparsed_uri
if not CFG_ACCESS_CONTROL_LEVEL_SITE:
title = CFG_WEBACCESS_MSGS[5]
if not uid:
uid = getUid(req)
try:
res = run_sql("SELECT email FROM user WHERE id=%s AND note=1", (uid,))
if res and res[0][0]:
if text:
body = text
else:
body = "%s %s" % (CFG_WEBACCESS_WARNING_MSGS[9] % cgi.escape(res[0][0]),
("%s %s" % (CFG_WEBACCESS_MSGS[0] % urllib.quote(referer), CFG_WEBACCESS_MSGS[1])))
else:
if text:
body = text
else:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 1:
body = CFG_WEBACCESS_MSGS[3]
else:
body = CFG_WEBACCESS_WARNING_MSGS[4] + CFG_WEBACCESS_MSGS[2]
except OperationalError as e:
body = _("Database problem") + ': ' + str(e)
elif CFG_ACCESS_CONTROL_LEVEL_SITE == 1:
title = CFG_WEBACCESS_MSGS[8]
body = "%s %s" % (CFG_WEBACCESS_MSGS[7], CFG_WEBACCESS_MSGS[2])
elif CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
title = CFG_WEBACCESS_MSGS[6]
body = "%s %s" % (CFG_WEBACCESS_MSGS[4], CFG_WEBACCESS_MSGS[2])
return page(title=title,
language=ln,
uid=getUid(req),
body=body,
navtrail=navtrail,
req=req,
navmenuid=navmenuid)
def getUid(req):
"""Return user ID taking it from the cookie of the request.
Includes control mechanism for the guest users, inserting in
the database table when need be, raising the cookie back to the
client.
User ID is set to 0 when client refuses cookie or we are in the
read-only site operation mode.
User ID is set to -1 when we are in the permission denied site
operation mode.
getUid(req) -> userId
"""
#if hasattr(req, '_user_info'):
# return req._user_info['_uid']
if CFG_ACCESS_CONTROL_LEVEL_SITE == 1: return 0
if CFG_ACCESS_CONTROL_LEVEL_SITE == 2: return -1
guest = 0
from flask import session
uid = session.uid
if not session.need_https:
if uid == -1: # first time, so create a guest user
if CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
uid = session['uid'] = createGuestUser()
session.set_remember_me(False)
guest = 1
else:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
session['uid'] = 0
session.set_remember_me(False)
return 0
else:
return -1
else:
if not hasattr(req, '_user_info') and 'user_info' in session:
req._user_info = session['user_info']
req._user_info = collect_user_info(req, refresh=True)
if guest == 0:
guest = isGuestUser(uid)
if guest:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
return uid
elif CFG_ACCESS_CONTROL_LEVEL_GUESTS >= 1:
return -1
else:
res = run_sql("SELECT note FROM user WHERE id=%s", (uid,))
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 0:
return uid
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1 and res and res[0][0] in [1, "1"]:
return uid
else:
return -1
from invenio.ext.login import current_user, login_user, logout_user
getUid = lambda req: current_user.get_id()
def setUid(req, uid, remember_me=False):
"""It sets the userId into the session, and raise the cookie to the client.
"""
if uid > 0:
login_user(uid, remember_me)
else:
logout_user()
return uid
def session_param_del(req, key):
"""
Remove a given key from the session.
"""
del session[key]
def session_param_set(req, key, value):
"""
Set a VALUE for the session param KEY for the current session.
"""
session[key] = value
def session_param_get(req, key, default = None):
"""
Return session parameter value associated with session parameter KEY for the current session.
If the key doesn't exists return the provided default.
"""
return session.get(key, default)
def session_param_list(req):
"""
List all available session parameters.
"""
return session.keys()
def get_last_login(uid):
"""Return the last_login datetime for uid if any, otherwise return the Epoch."""
res = run_sql('SELECT last_login FROM user WHERE id=%s', (uid,), 1)
if res and res[0][0]:
return res[0][0]
else:
return datetime.datetime(1970, 1, 1)
def get_user_info(uid, ln=CFG_SITE_LANG):
"""Get infos for a given user.
@param uid: user id (int)
@return: tuple: (uid, nickname, display_name)
"""
_ = gettext_set_language(ln)
query = """SELECT id, nickname
FROM user
WHERE id=%s"""
res = run_sql(query, (uid,))
if res:
if res[0]:
user = list(res[0])
if user[1]:
user.append(user[1])
else:
user[1] = str(user[0])
user.append(_("user") + ' #' + str(user[0]))
return tuple(user)
return (uid, '', _("N/A"))
def get_uid_from_email(email):
"""Return the uid corresponding to an email.
Return -1 when the email does not exists."""
try:
res = run_sql("SELECT id FROM user WHERE email=%s", (email,))
if res:
return res[0][0]
else:
return -1
except OperationalError:
register_exception()
return -1
def isGuestUser(uid, run_on_slave=True):
"""It Checks if the userId corresponds to a guestUser or not
isGuestUser(uid) -> boolean
"""
out = 1
try:
res = run_sql("SELECT email FROM user WHERE id=%s LIMIT 1", (uid,), 1,
run_on_slave=run_on_slave)
if res:
if res[0][0]:
out = 0
except OperationalError:
register_exception()
return out
def isUserSubmitter(user_info):
"""Return True if the user is a submitter for something; False otherwise."""
u_email = get_email(user_info['uid'])
res = run_sql("SELECT email FROM sbmSUBMISSIONS WHERE email=%s LIMIT 1", (u_email,), 1)
return len(res) > 0
def isUserReferee(user_info):
"""Return True if the user is a referee for something; False otherwise."""
if CFG_CERN_SITE:
return True
else:
for (role_id, role_name, role_description) in acc_get_action_roles(acc_get_action_id('referee')):
if acc_is_user_in_role(user_info, role_id):
return True
return False
def isUserAdmin(user_info):
"""Return True if the user has some admin rights; False otherwise."""
return acc_find_possible_activities(user_info) != {}
def isUserSuperAdmin(user_info):
"""Return True if the user is superadmin; False otherwise."""
if run_sql("""SELECT r.id
FROM accROLE r LEFT JOIN user_accROLE ur
ON r.id = ur.id_accROLE
WHERE r.name = %s AND
ur.id_user = %s AND ur.expiration>=NOW() LIMIT 1""", (SUPERADMINROLE, user_info['uid']), 1, run_on_slave=True):
return True
return acc_firerole_check_user(user_info, load_role_definition(acc_get_role_id(SUPERADMINROLE)))
def nickname_valid_p(nickname):
"""Check whether wanted NICKNAME supplied by the user is valid.
At the moment we just check whether it is not empty, does not
contain blanks or @, is not equal to `guest', etc.
This check relies on re_invalid_nickname regexp (see above)
Return 1 if nickname is okay, return 0 if it is not.
"""
if nickname and \
not(nickname.startswith(' ') or nickname.endswith(' ')) and \
nickname.lower() != 'guest':
if not re_invalid_nickname.match(nickname):
return 1
return 0
def email_valid_p(email):
"""Check whether wanted EMAIL address supplied by the user is valid.
At the moment we just check whether it contains '@' and whether
it doesn't contain blanks. We also check the email domain if
CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN is set.
Return 1 if email is okay, return 0 if it is not.
"""
if (email.find("@") <= 0) or (email.find(" ") > 0):
return 0
elif CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN:
if not email.endswith(CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN):
return 0
return 1
def confirm_email(email):
"""Confirm the email. It returns None when there are problems, otherwise
it return the uid involved."""
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 0:
activated = 1
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
activated = 0
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2:
return -1
run_sql('UPDATE user SET note=%s where email=%s', (activated, email))
res = run_sql('SELECT id FROM user where email=%s', (email,))
if res:
if CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS:
send_new_admin_account_warning(email, CFG_SITE_ADMIN_EMAIL)
return res[0][0]
else:
return None
def registerUser(req, email, passw, nickname, register_without_nickname=False,
login_method=None, ln=CFG_SITE_LANG):
"""Register user with the desired values of NICKNAME, EMAIL and
PASSW.
If REGISTER_WITHOUT_NICKNAME is set to True, then ignore
desired NICKNAME and do not set any. This is suitable for
external authentications so that people can login without
having to register an internal account first.
Return 0 if the registration is successful, 1 if email is not
valid, 2 if nickname is not valid, 3 if email is already in the
database, 4 if nickname is already in the database, 5 when
users cannot register themselves because of the site policy, 6 when the
site is having problem contacting the user.
If login_method is None or is equal to the key corresponding to local
authentication, then CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS is taken
in account for deciding the behaviour about registering.
"""
# is email valid?
email = email.lower()
if not email_valid_p(email):
return 1
_ = gettext_set_language(ln)
# is email already taken?
res = run_sql("SELECT email FROM user WHERE email=%s", (email,))
if len(res) > 0:
return 3
if register_without_nickname:
# ignore desired nick and use default empty string one:
nickname = ""
else:
# is nickname valid?
if not nickname_valid_p(nickname):
return 2
# is nickname already taken?
res = run_sql("SELECT nickname FROM user WHERE nickname=%s", (nickname,))
if len(res) > 0:
return 4
activated = 1 # By default activated
if not login_method or not CFG_EXTERNAL_AUTHENTICATION[login_method]: # local login
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2:
return 5
elif CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT:
activated = 2 # Email confirmation required
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1:
activated = 0 # Administrator confirmation required
if CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT:
address_activation_key = mail_cookie_create_mail_activation(
email,
cookie_timeout=datetime.timedelta(
days=CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS
)
)
try:
ip_address = req.remote_host or req.remote_ip
except:
ip_address = None
try:
if not send_email(CFG_SITE_SUPPORT_EMAIL, email, _("Account registration at %(sitename)s", sitename=CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME)),
tmpl.tmpl_account_address_activation_email_body(
email, address_activation_key,
ip_address, ln)):
return 1
except (smtplib.SMTPException, socket.error):
return 6
# okay, go on and register the user: FIXME
user = User(nickname=nickname,
email=email,
password=passw,
note=activated)
try:
db.session.add(user)
db.session.commit()
except:
db.session.rollback()
return 7
if activated == 1: # Ok we consider the user as logged in :-)
setUid(req, uid)
return 0
def updateDataUser(uid, email, nickname):
"""
Update user data. Used when a user changed his email or password
or nickname.
"""
email = email.lower()
if email == 'guest':
return 0
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 2:
run_sql("update user set email=%s where id=%s", (email, uid))
if nickname and nickname != '':
run_sql("update user set nickname=%s where id=%s", (nickname, uid))
return 1
def updatePasswordUser(uid, password):
"""Update the password of a user."""
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
run_sql("update user set password=AES_ENCRYPT(email,%s) where id=%s", (password, uid))
return 1
def merge_usera_into_userb(id_usera, id_userb):
"""
Merges all the information of usera into userb.
Deletes afterwards any reference to usera.
The information about SQL tables is contained in the CFG_WEBUSER_USER_TABLES
variable.
"""
preferencea = get_user_preferences(id_usera)
preferenceb = get_user_preferences(id_userb)
preferencea.update(preferenceb)
set_user_preferences(id_userb, preferencea)
try:
## FIXME: for the time being, let's disable locking
## until we will move to InnoDB and we will have
## real transitions
#for table, dummy in CFG_WEBUSER_USER_TABLES:
#run_sql("LOCK TABLE %s WRITE" % table)
index = 0
table = ''
try:
for index, (table, column) in enumerate(CFG_WEBUSER_USER_TABLES):
run_sql("UPDATE %(table)s SET %(column)s=%%s WHERE %(column)s=%%s; DELETE FROM %(table)s WHERE %(column)s=%%s;" % {
'table': table,
'column': column
}, (id_userb, id_usera, id_usera))
except Exception as err:
msg = "Error when merging id_user=%s into id_userb=%s for table %s: %s\n" % (id_usera, id_userb, table, err)
msg += "users where succesfully already merged for tables: %s\n" % ', '.join([table[0] for table in CFG_WEBUSER_USER_TABLES[:index]])
msg += "users where not succesfully already merged for tables: %s\n" % ', '.join([table[0] for table in CFG_WEBUSER_USER_TABLES[index:]])
register_exception(alert_admin=True, prefix=msg)
raise
finally:
## FIXME: locking disabled
#run_sql("UNLOCK TABLES")
pass
def loginUser(req, p_un, p_pw, login_method):
"""It is a first simple version for the authentication of user. It returns the id of the user,
for checking afterwards if the login is correct
"""
# p_un passed may be an email or a nickname:
p_email = get_email_from_username(p_un)
# go on with the old stuff based on p_email:
if not login_method in CFG_EXTERNAL_AUTHENTICATION:
return (None, p_email, p_pw, 12)
if CFG_EXTERNAL_AUTHENTICATION[login_method]: # External Authentication
try:
result = CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(p_email, p_pw, req)
if (result == (None, None) or result is None) and not login_method in ['oauth1', 'oauth2', 'openid']:
# There is no need to call auth_user with username for
# OAuth1, OAuth2 and OpenID authentication
result = CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(p_un, p_pw, req) ## We try to login with either the email of the nickname
if isinstance(result, (tuple, list)) and len(result) == 2:
p_email, p_extid = result
else:
## For backward compatibility we use the email as external
## identifier if it was not returned already by the plugin
p_email, p_extid = str(result), str(result)
if p_email:
p_email = p_email.lower()
if not p_extid:
p_extid = p_email
elif not p_extid:
try:
# OpenID and OAuth authentications have own error messages
return (None, p_email, p_pw, CFG_EXTERNAL_AUTHENTICATION[login_method].get_msg(req))
except NotImplementedError:
return(None, p_email, p_pw, 15)
else:
# External login is successfull but couldn't fetch the email
# address.
generate_string = lambda: reduce((lambda x, y: x+y), [random.choice("qwertyuiopasdfghjklzxcvbnm1234567890") for i in range(32)])
random_string = generate_string()
p_email = CFG_TEMP_EMAIL_ADDRESS % random_string
while run_sql("SELECT * FROM user WHERE email=%s", (p_email,)):
random_string = generate_string()
p_email = CFG_TEMP_EMAIL_ADDRESS % random_string
except InvenioWebAccessExternalAuthError:
register_exception(req=req, alert_admin=True)
raise
if p_email: # Authenthicated externally
query_result = run_sql("SELECT id_user FROM userEXT WHERE id=%s and method=%s", (p_extid, login_method))
if query_result:
## User was already registered with this external method.
id_user = query_result[0][0]
old_email = run_sql("SELECT email FROM user WHERE id=%s", (id_user,))[0][0]
# Look if the email address matches with the template given.
# If it matches, use the email address saved in the database.
regexp = re.compile(CFG_TEMP_EMAIL_ADDRESS % r"\w*")
if regexp.match(p_email):
p_email = old_email
if old_email != p_email:
## User has changed email of reference.
res = run_sql("SELECT id FROM user WHERE email=%s", (p_email,))
if res:
## User was also registered with the other email.
## We should merge the two!
new_id = res[0][0]
if new_id == id_user:
raise AssertionError("We should not reach this situation: new_id=%s, id_user=%s, old_email=%s, p_email=%s" % (new_id, id_user, old_email, p_email))
merge_usera_into_userb(id_user, new_id)
run_sql("DELETE FROM user WHERE id=%s", (id_user, ))
for row in run_sql("SELECT method FROM userEXT WHERE id_user=%s", (id_user, )):
## For all known accounts of id_user not conflicting with new_id we move them to refer to new_id
if not run_sql("SELECT method FROM userEXT WHERE id_user=%s AND method=%s", (new_id, row[0])):
run_sql("UPDATE userEXT SET id_user=%s WHERE id_user=%s AND method=%s", (new_id, id_user, row[0]))
## And we delete the duplicate remaining ones :-)
run_sql("DELETE FROM userEXT WHERE id_user=%s", (id_user, ))
id_user = new_id
else:
## We just need to rename the email address of the
## corresponding user. Unfortunately the local
## password will be then invalid, but its unlikely
## the user is using both an external and a local
## account.
run_sql("UPDATE user SET email=%s WHERE id=%s", (p_email, id_user))
else:
## User was not already registered with this external method.
query_result = run_sql("SELECT id FROM user WHERE email=%s", (p_email, ))
if query_result:
## The user was already known with this email
id_user = query_result[0][0]
## We fix the inconsistence in the userEXT table.
run_sql("INSERT INTO userEXT(id, method, id_user) VALUES(%s, %s, %s) ON DUPLICATE KEY UPDATE id=%s, method=%s, id_user=%s", (p_extid, login_method, id_user, p_extid, login_method, id_user))
else:
## First time user
p_pw_local = int(random.random() * 1000000)
p_nickname = ''
if CFG_EXTERNAL_AUTHENTICATION[login_method].enforce_external_nicknames:
try: # Let's discover the external nickname!
p_nickname = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_nickname(p_email, p_pw, req)
except (AttributeError, NotImplementedError):
pass
except:
register_exception(req=req, alert_admin=True)
raise
res = registerUser(req, p_email, p_pw_local, p_nickname,
register_without_nickname=p_nickname == '',
login_method=login_method)
if res == 4 or res == 2: # The nickname was already taken
res = registerUser(req, p_email, p_pw_local, '',
register_without_nickname=True,
login_method=login_method)
query_result = run_sql("SELECT id from user where email=%s", (p_email,))
id_user = query_result[0][0]
elif res == 0: # Everything was ok, with or without nickname.
query_result = run_sql("SELECT id from user where email=%s", (p_email,))
id_user = query_result[0][0]
elif res == 6: # error in contacting the user via email
return (None, p_email, p_pw_local, 19)
else:
return (None, p_email, p_pw_local, 13)
run_sql("INSERT INTO userEXT(id, method, id_user) VALUES(%s, %s, %s)", (p_extid, login_method, id_user))
if CFG_EXTERNAL_AUTHENTICATION[login_method].enforce_external_nicknames:
## Let's still fetch a possibly upgraded nickname.
try: # Let's discover the external nickname!
p_nickname = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_nickname(p_email, p_pw, req)
if nickname_valid_p(p_nickname) and nicknameUnique(p_nickname) == 0:
updateDataUser(id_user, p_email, p_nickname)
except (AttributeError, NotImplementedError):
pass
except:
register_exception(alert_admin=True)
raise
try:
groups = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_groups_membership(p_email, p_pw, req)
# groups is a dictionary {group_name : group_description,}
new_groups = {}
for key, value in groups.items():
new_groups[key + " [" + str(login_method) + "]"] = value
groups = new_groups
except (AttributeError, NotImplementedError):
pass
except:
register_exception(req=req, alert_admin=True)
return (None, p_email, p_pw, 16)
else: # Groups synchronization
if groups:
from invenio.webgroup import synchronize_external_groups
synchronize_external_groups(id_user, groups, login_method)
user_prefs = get_user_preferences(id_user)
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
## I.e. if the login method is not of robot type:
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 4:
# Let's prevent the user to switch login_method
if "login_method" in user_prefs and \
user_prefs["login_method"] != login_method:
return (None, p_email, p_pw, 11)
user_prefs["login_method"] = login_method
# Cleaning external settings
for key in user_prefs.keys():
if key.startswith('EXTERNAL_'):
del user_prefs[key]
try:
# Importing external settings
new_prefs = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_preferences(p_email, p_pw, req)
for key, value in new_prefs.items():
user_prefs['EXTERNAL_' + key] = value
except (AttributeError, NotImplementedError):
pass
except InvenioWebAccessExternalAuthError:
register_exception(req=req, alert_admin=True)
return (None, p_email, p_pw, 16)
# Storing settings
set_user_preferences(id_user, user_prefs)
else:
return (None, p_un, p_pw, 10)
else: # Internal Authenthication
if not p_pw:
p_pw = ''
query_result = run_sql("SELECT id,email,note from user where email=%s and password=AES_ENCRYPT(email,%s)", (p_email, p_pw,))
if query_result:
#FIXME drop external groups and settings
note = query_result[0][2]
id_user = query_result[0][0]
if note == '1': # Good account
preferred_login_method = get_user_preferences(query_result[0][0])['login_method']
p_email = query_result[0][1].lower()
if login_method != preferred_login_method:
if preferred_login_method in CFG_EXTERNAL_AUTHENTICATION:
return (None, p_email, p_pw, 11)
elif note == '2': # Email address need to be confirmed by user
return (None, p_email, p_pw, 17)
elif note == '0': # Account need to be confirmed by administrator
return (None, p_email, p_pw, 18)
else:
return (None, p_email, p_pw, 14)
# Login successful! Updating the last access time
run_sql("UPDATE user SET last_login=NOW() WHERE email=%s", (p_email,))
return (id_user, p_email, p_pw, 0)
def drop_external_settings(userId):
"""Drop the external (EXTERNAL_) settings of userid."""
prefs = get_user_preferences(userId)
for key in prefs.keys():
if key.startswith('EXTERNAL_'):
del prefs[key]
set_user_preferences(userId, prefs)
def logoutUser(req):
"""It logout the user of the system, creating a guest user.
"""
if CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
uid = createGuestUser()
session['uid'] = uid
session.set_remember_me(False)
else:
uid = 0
session.invalidate()
if hasattr(req, '_user_info'):
delattr(req, '_user_info')
return uid
def username_exists_p(username):
"""Check if USERNAME exists in the system. Username may be either
nickname or email.
Return 1 if it does exist, 0 if it does not.
"""
if username == "":
# return not exists if asked for guest users
return 0
res = run_sql("SELECT email FROM user WHERE email=%s", (username,)) + \
run_sql("SELECT email FROM user WHERE nickname=%s", (username,))
if len(res) > 0:
return 1
return 0
def emailUnique(p_email):
"""Check if the email address only exists once. If yes, return userid, if not, -1
"""
query_result = run_sql("select id, email from user where email=%s", (p_email,))
if len(query_result) == 1:
return query_result[0][0]
elif len(query_result) == 0:
return 0
return -1
def nicknameUnique(p_nickname):
"""Check if the nickname only exists once. If yes, return userid, if not, -1
"""
query_result = run_sql("select id, nickname from user where nickname=%s", (p_nickname,))
if len(query_result) == 1:
return query_result[0][0]
elif len(query_result) == 0:
return 0
return -1
def update_Uid(req, p_email, remember_me=False):
"""It updates the userId of the session. It is used when a guest user is logged in succesfully in the system with a given email and password.
As a side effect it will discover all the restricted collection to which the user has right to
"""
query_ID = int(run_sql("select id from user where email=%s",
(p_email,))[0][0])
setUid(req, query_ID, remember_me)
return query_ID
def send_new_admin_account_warning(new_account_email, send_to, ln=CFG_SITE_LANG):
"""Send an email to the address given by send_to about the new account new_account_email."""
_ = gettext_set_language(ln)
sub = _("New account on") + " '%s'" % CFG_SITE_NAME
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
sub += " - " + _("PLEASE ACTIVATE")
body = _("A new account has been created on") + " '%s'" % CFG_SITE_NAME
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += _(" and is awaiting activation")
body += ":\n\n"
body += _(" Username/Email") + ": %s\n\n" % new_account_email
body += _("You can approve or reject this account request at") + ": %s/admin/webaccess/webaccessadmin.py/manageaccounts\n" % CFG_SITE_URL
return send_email(CFG_SITE_SUPPORT_EMAIL, send_to, subject=sub, content=body)
def get_email(uid):
"""Return email address of the user uid. Return string 'guest' in case
the user is not found."""
out = "guest"
res = run_sql("SELECT email FROM user WHERE id=%s", (uid,), 1)
if res and res[0][0]:
out = res[0][0].lower()
return out
def get_email_from_username(username):
"""Return email address of the user corresponding to USERNAME.
The username may be either nickname or email. Return USERNAME
untouched if not found in the database or if found several
matching entries.
"""
if username == '':
return ''
out = username
res = run_sql("SELECT email FROM user WHERE email=%s", (username,), 1) + \
run_sql("SELECT email FROM user WHERE nickname=%s", (username,), 1)
if res and len(res) == 1:
out = res[0][0].lower()
return out
#def get_password(uid):
#"""Return password of the user uid. Return None in case
#the user is not found."""
#out = None
#res = run_sql("SELECT password FROM user WHERE id=%s", (uid,), 1)
#if res and res[0][0] != None:
#out = res[0][0]
#return out
def get_nickname(uid):
"""Return nickname of the user uid. Return None in case
the user is not found."""
out = None
res = run_sql("SELECT nickname FROM user WHERE id=%s", (uid,), 1)
if res and res[0][0]:
out = res[0][0]
return out
def get_nickname_or_email(uid):
"""Return nickname (preferred) or the email address of the user uid.
Return string 'guest' in case the user is not found."""
out = "guest"
res = run_sql("SELECT nickname, email FROM user WHERE id=%s", (uid,), 1)
if res and res[0]:
if res[0][0]:
out = res[0][0]
elif res[0][1]:
out = res[0][1].lower()
return out
def create_userinfobox_body(req, uid, language="en"):
"""Create user info box body for user UID in language LANGUAGE."""
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
try:
return tmpl.tmpl_create_userinfobox(ln=language,
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats']
)
except OperationalError:
return ""
def create_useractivities_menu(req, uid, navmenuid, ln="en"):
"""Create user activities menu.
@param req: request object
@param uid: user id
@type uid: int
@param navmenuid: the section of the website this page belongs (search, submit, baskets, etc.)
@type navmenuid: string
@param ln: language
@type ln: string
@return: HTML menu of the user activities
@rtype: string
"""
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
is_user_menu_selected = False
if navmenuid == 'personalize' or \
navmenuid.startswith('your') and \
navmenuid != 'youraccount':
is_user_menu_selected = True
try:
return tmpl.tmpl_create_useractivities_menu(
ln=ln,
selected=is_user_menu_selected,
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats'],
usecomments=user_info['precached_sendcomments'],
)
except OperationalError:
return ""
def create_adminactivities_menu(req, uid, navmenuid, ln="en"):
"""Create admin activities menu.
@param req: request object
@param uid: user id
@type uid: int
@param navmenuid: the section of the website this page belongs (search, submit, baskets, etc.)
@type navmenuid: string
@param ln: language
@type ln: string
@return: HTML menu of the user activities
@rtype: string
"""
_ = gettext_set_language(ln)
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
activities = acc_find_possible_activities(user_info, ln)
# For BibEdit and BibDocFile menu items, take into consideration
# current record whenever possible
if _("Run Record Editor") in activities or \
_("Run Document File Manager") in activities and \
user_info['uri'].startswith('/' + CFG_SITE_RECORD + '/'):
try:
# Get record ID and try to cast it to an int
current_record_id = int(urlparse.urlparse(user_info['uri'])[2].split('/')[2])
except:
pass
else:
if _("Run Record Editor") in activities:
activities[_("Run Record Editor")] = activities[_("Run Record Editor")] + '&#state=edit&recid=' + str(current_record_id)
if _("Run Document File Manager") in activities:
activities[_("Run Document File Manager")] = activities[_("Run Document File Manager")] + '&recid=' + str(current_record_id)
try:
return tmpl.tmpl_create_adminactivities_menu(
ln=ln,
selected=navmenuid == 'admin',
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats'],
activities=activities
)
except OperationalError:
return ""
def list_registered_users():
"""List all registered users."""
return run_sql("SELECT id,email FROM user where email!=''")
def list_users_in_role(role):
"""List all users of a given role (see table accROLE)
@param role: role of user (string)
@return: list of uids
"""
res = run_sql("""SELECT uacc.id_user
FROM user_accROLE uacc JOIN accROLE acc
ON uacc.id_accROLE=acc.id
WHERE acc.name=%s""",
(role,), run_on_slave=True)
if res:
return map(lambda x: int(x[0]), res)
return []
def list_users_in_roles(role_list):
"""List all users of given roles (see table accROLE)
@param role_list: list of roles [string]
@return: list of uids
"""
if not(type(role_list) is list or type(role_list) is tuple):
role_list = [role_list]
query = """SELECT DISTINCT(uacc.id_user)
FROM user_accROLE uacc JOIN accROLE acc
ON uacc.id_accROLE=acc.id
"""
query_addons = ""
query_params = ()
if len(role_list) > 0:
query_params = role_list
query_addons = " WHERE "
for role in role_list[:-1]:
query_addons += "acc.name=%s OR "
query_addons += "acc.name=%s"
res = run_sql(query + query_addons, query_params, run_on_slave=True)
if res:
return map(lambda x: int(x[0]), res)
return []
def get_uid_based_on_pref(prefname, prefvalue):
"""get the user's UID based where his/her preference prefname has value prefvalue in preferences"""
prefs = run_sql("SELECT id, settings FROM user WHERE settings is not NULL")
the_uid = None
for pref in prefs:
try:
settings = deserialize_via_marshal(pref[1])
if (prefname in settings) and (settings[prefname] == prefvalue):
the_uid = pref[0]
except:
pass
return the_uid
def get_user_preferences(uid):
pref = run_sql("SELECT id, settings FROM user WHERE id=%s", (uid,))
if pref:
try:
# FIXME: return User.query.get(uid).settings
return deserialize_via_marshal(pref[0][1])
except:
pass
return get_default_user_preferences() # empty dict mean no preferences
def set_user_preferences(uid, pref):
assert(type(pref) == type({}))
run_sql("UPDATE user SET settings=%s WHERE id=%s",
(serialize_via_marshal(pref), uid))
def get_default_user_preferences():
user_preference = {
'login_method': ''}
if CFG_EXTERNAL_AUTH_DEFAULT in CFG_EXTERNAL_AUTHENTICATION:
user_preference['login_method'] = CFG_EXTERNAL_AUTH_DEFAULT
return user_preference
def get_preferred_user_language(req):
def _get_language_from_req_header(accept_language_header):
"""Extract langs info from req.headers_in['Accept-Language'] which
should be set to something similar to:
'fr,en-us;q=0.7,en;q=0.3'
"""
tmp_langs = {}
for lang in accept_language_header.split(','):
lang = lang.split(';q=')
if len(lang) == 2:
lang[1] = lang[1].replace('"', '') # Hack for Yeti robot
try:
tmp_langs[float(lang[1])] = lang[0]
except ValueError:
pass
else:
tmp_langs[1.0] = lang[0]
ret = []
priorities = tmp_langs.keys()
priorities.sort()
priorities.reverse()
for priority in priorities:
ret.append(tmp_langs[priority])
return ret
uid = getUid(req)
guest = isGuestUser(uid)
new_lang = None
preferred_lang = None
if not guest:
user_preferences = get_user_preferences(uid)
preferred_lang = new_lang = user_preferences.get('language', None)
if not new_lang:
try:
new_lang = wash_languages(cgi.parse_qs(req.args)['ln'])
except (TypeError, AttributeError, KeyError):
pass
if not new_lang:
try:
new_lang = wash_languages(_get_language_from_req_header(req.headers_in['Accept-Language']))
except (TypeError, AttributeError, KeyError):
pass
new_lang = wash_language(new_lang)
if new_lang != preferred_lang and not guest:
user_preferences['language'] = new_lang
set_user_preferences(uid, user_preferences)
return new_lang
def collect_user_info(req, login_time=False, refresh=False):
"""Given the mod_python request object rec or a uid it returns a dictionary
containing at least the keys uid, nickname, email, groups, plus any external keys in
the user preferences (collected at login time and built by the different
external authentication plugins) and if the mod_python request object is
provided, also the remote_ip, remote_host, referer, agent fields.
NOTE: if req is a mod_python request object, the user_info dictionary
is saved into req._user_info (for caching purpouses)
setApacheUser & setUid will properly reset it.
"""
if type(req) in [long, int] or req is None:
from invenio.ext.login import UserInfo
return UserInfo(req)
from flask.ext.login import current_user
return current_user._get_current_object()
##
## NOT USED ANYMORE
## please see inveno.ext.login
##
#FIXME move EXTERNAL SSO functionality
from invenio.legacy.search_engine import get_permitted_restricted_collections
user_info = {
'remote_ip' : '',
'remote_host' : '',
'referer' : '',
'uri' : '',
'agent' : '',
'uid' :-1,
'nickname' : '',
'email' : '',
'group' : [],
'guest' : '1',
'session' : None,
'precached_permitted_restricted_collections' : [],
'precached_usebaskets' : False,
'precached_useloans' : False,
'precached_usegroups' : False,
'precached_usealerts' : False,
'precached_usemessages' : False,
'precached_viewsubmissions' : False,
'precached_useapprove' : False,
'precached_useadmin' : False,
'precached_usestats' : False,
'precached_viewclaimlink' : False,
'precached_usepaperclaim' : False,
'precached_usepaperattribution' : False,
'precached_canseehiddenmarctags' : False,
'precached_sendcomments' : False,
}
try:
is_req = False
is_flask = False
session = None
if not req:
uid = -1
elif type(req) in (type(1), type(1L)):
## req is infact a user identification
uid = req
elif type(req) is dict:
## req is by mistake already a user_info
try:
assert('uid' in req)
assert('email' in req)
assert('nickname' in req)
except AssertionError:
## mmh... misuse of collect_user_info. Better warn the admin!
register_exception(alert_admin=True)
user_info.update(req)
return user_info
elif isinstance(req, Request):
is_flask = True
from flask import session
uid = session.uid
if 'user_info' in session:
user_info = session['user_info']
if not login_time and not refresh:
return user_info
user_info['remote_ip'] = req.remote_addr
user_info['session'] = session.sid
user_info['remote_host'] = req.environ.get('REMOTE_HOST', '')
user_info['referer'] = req.referrer
user_info['uri'] = req.url or ''
user_info['agent'] = req.user_agent or 'N/A'
else:
is_req = True
uid = getUid(req)
if hasattr(req, '_user_info') and not login_time:
user_info = req._user_info
if not refresh:
return req._user_info
req._user_info = user_info
try:
user_info['remote_ip'] = req.remote_ip
except gaierror:
#FIXME: we should support IPV6 too. (hint for FireRole)
pass
user_info['session'] = session.sid
user_info['remote_host'] = req.remote_host or ''
user_info['referer'] = req.headers_in.get('Referer', '')
user_info['uri'] = req.unparsed_uri or ''
user_info['agent'] = req.headers_in.get('User-Agent', 'N/A')
user_info['uid'] = uid
user_info['nickname'] = get_nickname(uid) or ''
user_info['email'] = get_email(uid) or ''
user_info['group'] = []
user_info['guest'] = str(isGuestUser(uid))
if user_info['guest'] == '1' and CFG_INSPIRE_SITE:
usepaperattribution = False
viewclaimlink = False
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionviewers"))):
usepaperattribution = True
# if (CFG_BIBAUTHORID_ENABLED
# and usepaperattribution
# and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionlinkviewers"))):
# viewclaimlink = True
viewlink = False
if is_req or is_flask:
try:
viewlink = session['personinfo']['claim_in_process']
except (KeyError, TypeError):
pass
if (CFG_BIBAUTHORID_ENABLED
and usepaperattribution
and viewlink):
viewclaimlink = True
user_info['precached_viewclaimlink'] = viewclaimlink
user_info['precached_usepaperattribution'] = usepaperattribution
if user_info['guest'] == '0':
user_info['group'] = [group[1] for group in get_groups(uid)]
prefs = get_user_preferences(uid)
login_method = prefs['login_method']
## NOTE: we fall back to default login_method if the login_method
## specified in the user settings does not exist (e.g. after
## a migration.)
login_object = CFG_EXTERNAL_AUTHENTICATION.get(login_method, CFG_EXTERNAL_AUTHENTICATION[CFG_EXTERNAL_AUTH_DEFAULT])
if login_object and ((datetime.datetime.now() - get_last_login(uid)).seconds > 3600):
## The user uses an external authentication method and it's a bit since
## she has not performed a login
if not CFG_EXTERNAL_AUTH_USING_SSO or (
is_req and login_object.in_shibboleth(req)):
## If we're using SSO we must be sure to be in HTTPS and Shibboleth handler
## otherwise we can't really read anything, hence
## it's better skip the synchronization
try:
groups = login_object.fetch_user_groups_membership(user_info['email'], req=req)
# groups is a dictionary {group_name : group_description,}
new_groups = {}
for key, value in groups.items():
new_groups[key + " [" + str(login_method) + "]"] = value
groups = new_groups
except (AttributeError, NotImplementedError, TypeError, InvenioWebAccessExternalAuthError):
pass
else: # Groups synchronization
from invenio.webgroup import synchronize_external_groups
synchronize_external_groups(uid, groups, login_method)
user_info['group'] = [group[1] for group in get_groups(uid)]
try:
# Importing external settings
new_prefs = login_object.fetch_user_preferences(user_info['email'], req=req)
for key, value in new_prefs.items():
prefs['EXTERNAL_' + key] = value
except (AttributeError, NotImplementedError, TypeError, InvenioWebAccessExternalAuthError):
pass
else:
set_user_preferences(uid, prefs)
prefs = get_user_preferences(uid)
run_sql('UPDATE user SET last_login=NOW() WHERE id=%s', (uid,))
if prefs:
for key, value in iteritems(prefs):
user_info[key.lower()] = value
if login_time:
## Heavy computational information
from invenio.modules.access.engine import acc_authorize_action
user_info['precached_permitted_restricted_collections'] = get_permitted_restricted_collections(user_info)
user_info['precached_usebaskets'] = acc_authorize_action(user_info, 'usebaskets')[0] == 0
user_info['precached_useloans'] = acc_authorize_action(user_info, 'useloans')[0] == 0
user_info['precached_usegroups'] = acc_authorize_action(user_info, 'usegroups')[0] == 0
user_info['precached_usealerts'] = acc_authorize_action(user_info, 'usealerts')[0] == 0
user_info['precached_usemessages'] = acc_authorize_action(user_info, 'usemessages')[0] == 0
user_info['precached_usestats'] = acc_authorize_action(user_info, 'runwebstatadmin')[0] == 0
user_info['precached_viewsubmissions'] = isUserSubmitter(user_info)
user_info['precached_useapprove'] = isUserReferee(user_info)
user_info['precached_useadmin'] = isUserAdmin(user_info)
user_info['precached_canseehiddenmarctags'] = acc_authorize_action(user_info, 'runbibedit')[0] == 0
user_info['precached_sendcomments'] = acc_authorize_action(user_info, 'sendcomment', '*')[0] == 0
usepaperclaim = False
usepaperattribution = False
viewclaimlink = False
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperclaimviewers"))):
usepaperclaim = True
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionviewers"))):
usepaperattribution = True
viewlink = False
if is_req or is_flask:
try:
viewlink = session['personinfo']['claim_in_process']
except (KeyError, TypeError):
pass
if (CFG_BIBAUTHORID_ENABLED
and usepaperattribution
and viewlink):
viewclaimlink = True
# if (CFG_BIBAUTHORID_ENABLED
# and ((usepaperclaim or usepaperattribution)
# and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionlinkviewers")))):
# viewclaimlink = True
user_info['precached_viewclaimlink'] = viewclaimlink
user_info['precached_usepaperclaim'] = usepaperclaim
user_info['precached_usepaperattribution'] = usepaperattribution
except Exception as e:
register_exception()
return user_info
|
PXke/invenio
|
invenio/legacy/webuser.py
|
Python
|
gpl-2.0
| 59,307
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Tristan Fischer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from xbmcswift2 import Plugin, xbmc
from resources.lib import scraper
plugin = Plugin()
STRINGS = {
'current': 30000,
'archive': 30002,
'search': 30003,
'network_error': 30020
}
@plugin.route('/')
def show_root():
items = [
{'label': _('current'),
'path': plugin.url_for('show_current')},
{'label': _('archive'),
'path': plugin.url_for('show_archive')},
{'label': _('search'),
'path': plugin.url_for('search')}
]
return plugin.finish(items)
@plugin.route('/current/')
def show_current():
videos = scraper.get_current_videos()
return __add_videos(videos)
@plugin.route('/watch/<video_id>')
def watch_video(video_id):
video_url = scraper.get_video_url(video_id)
return plugin.set_resolved_url(video_url)
@plugin.route('/archive/')
def show_archive():
archive_dates = scraper.get_archive_dates()
items = [{
'label': archive_date['title'],
'path': plugin.url_for(
'show_archived_videos',
archive_id=archive_date['archive_id'],
),
} for archive_date in archive_dates]
items.reverse()
return plugin.finish(items)
@plugin.route('/archive/<archive_id>/')
def show_archived_videos(archive_id):
videos = scraper.get_archived_videos(
archive_id=archive_id
)
return __add_videos(videos)
@plugin.route('/search/')
def search():
search_string = plugin.keyboard(heading=_('search'))
if search_string:
url = plugin.url_for(
'search_result',
search_string=search_string
)
plugin.redirect(url)
@plugin.route('/search/<search_string>/')
def search_result(search_string):
videos = scraper.get_search_result(search_string)
return __add_videos(videos)
def __add_videos(videos):
items = [{
'label': video['title'],
'thumbnail': video['thumb'],
'path': plugin.url_for(
'watch_video',
video_id=video['video_id']
),
'info': {
'date': video['date']
},
'is_playable': True,
} for video in videos]
finish_kwargs = {
'sort_methods': ('DATE', )
}
if plugin.get_setting('force_viewmode', bool):
finish_kwargs['view_mode'] = 'thumbnail'
return plugin.finish(items, **finish_kwargs)
def _(string_id):
if string_id in STRINGS:
return plugin.get_string(STRINGS[string_id])
else:
plugin.log.warning('String is missing: %s' % string_id)
return string_id
if __name__ == '__main__':
try:
plugin.run()
except scraper.NetworkError:
plugin.notify(msg=_('network_error'))
|
dersphere/plugin.video.dump
|
addon.py
|
Python
|
gpl-2.0
| 3,438
|
from builtins import str
from config import *
from util import *
import util
import helper
import dialogprogress
import xbmc
from dialogbase import DialogBase
from artworkupdater import ArtworkUpdater
ACTION_MOVEMENT_UP = (3,)
ACTION_MOVEMENT_DOWN = (4,)
ACTION_MOVEMENT = (3, 4, 5, 6, 159, 160)
ACTION_CANCEL_DIALOG = (9, 10, 51, 92, 110)
CONTROL_BUTTON_EXIT = 5101
CONTROL_BUTTON_OK = 5300
CONTROL_BUTTON_CANCEL = 5310
CONTROL_LIST_ROMCOLLECTIONS = 5210
CONTROL_LIST_ARTWORKTYPES = 5270
CONTROL_BUTTON_RC_DOWN = 5211
CONTROL_BUTTON_RC_UP = 5212
CONTROL_BUTTON_SCRAPER_DOWN = 5271
class UpdateArtworkDialog(DialogBase):
def __init__(self, *args, **kwargs):
# Don't put GUI sensitive stuff here (as the xml hasn't been read yet)
log.info('init ImportOptions')
self.gui = kwargs["gui"]
self.doModal()
def onInit(self):
log.info('onInit ImportOptions')
# 32120 = All
romCollectionList = [util.localize(32120)] + self.gui.config.getRomCollectionNames()
log.debug("Adding list of RC names: {0}".format(romCollectionList))
self.addItemsToList(CONTROL_LIST_ROMCOLLECTIONS, romCollectionList)
artworktypes = []
for filetype in self.gui.config.get_filetypes():
artworktypes.append(filetype.name)
artworktypes = [util.localize(32120)] +sorted(artworktypes)
self.addItemsToList(CONTROL_LIST_ARTWORKTYPES, sorted(artworktypes))
self.selectItemInList(util.localize(32120), CONTROL_LIST_ARTWORKTYPES)
def onAction(self, action):
if action.getId() in ACTION_CANCEL_DIALOG:
self.close()
def onClick(self, controlID):
if controlID == CONTROL_BUTTON_EXIT: # Close window button
self.close()
elif controlID == CONTROL_BUTTON_OK:
self.close()
self.update_artwork()
elif controlID == CONTROL_BUTTON_CANCEL:
self.close()
elif controlID in (CONTROL_BUTTON_RC_DOWN, CONTROL_BUTTON_RC_UP): # Rom Collection list
# HACK: add a little wait time as XBMC needs some ms to execute the MoveUp/MoveDown actions from the skin
xbmc.sleep(util.WAITTIME_UPDATECONTROLS)
control = self.getControlById(CONTROL_LIST_ROMCOLLECTIONS)
selectedRomCollectionName = str(control.getSelectedItem().getLabel())
#32120 = All
if selectedRomCollectionName == util.localize(32120):
artworktypes = []
for filetype in self.gui.config.get_filetypes():
artworktypes.append(filetype.name)
artworktypes = [util.localize(32120)] + sorted(artworktypes)
self.addItemsToList(CONTROL_LIST_ARTWORKTYPES, sorted(artworktypes))
self.selectItemInList(util.localize(32120), CONTROL_LIST_ARTWORKTYPES)
else:
artworktypes = []
rom_collection = self.gui.config.getRomCollectionByName(selectedRomCollectionName)
for mediaPath in rom_collection.mediaPaths:
artworktypes.append(mediaPath.fileType.name)
artworktypes = [util.localize(32120)] + sorted(artworktypes)
self.addItemsToList(CONTROL_LIST_ARTWORKTYPES, sorted(artworktypes))
self.selectItemInList(util.localize(32120), CONTROL_LIST_ARTWORKTYPES)
def update_artwork(self):
log.info('update_artwork')
control = self.getControlById(CONTROL_LIST_ROMCOLLECTIONS)
selected_romcollection_name = control.getSelectedItem().getLabel()
romcollection_id = 0
#32120 = All
if selected_romcollection_name != util.localize(32120):
romcollection = self.gui.config.getRomCollectionByName(selected_romcollection_name)
romcollection_id = romcollection.id
control = self.getControlById(CONTROL_LIST_ARTWORKTYPES)
selected_filetype_name = control.getSelectedItem().getLabel()
filetype_id = 0
#32120 = All
if selected_filetype_name != util.localize(32120):
filetype, errormessage = self.gui.config.get_filetype_by_name(selected_filetype_name, self.gui.config.tree)
filetype_id = filetype.id
progressDialog = dialogprogress.ProgressDialogGUI()
#32950 = Scan Artwork
progressDialog.create(util.localize(32950))
updater = ArtworkUpdater(progressDialog, self.gui.gdb, self.gui.config)
updater.update_artwork_cache(romcollection_id, filetype_id)
|
maloep/romcollectionbrowser
|
resources/lib/dialogupdateartwork.py
|
Python
|
gpl-2.0
| 4,551
|
# -*- coding: utf-8 -*-
import feedparser,re
from time import mktime
import datetime
def return_rsslist(url):
rssobj = feedparser.parse(url)
return_list = []
now = datetime.datetime.now()
datenow = datetime.datetime(int(now.year), int(now.month), int(now.day))
if rssobj:
rssdict=rssobj["entries"]
for entry in rssdict:
title = entry['title']
content = entry['summary_detail']['value']
dateitem = entry['published_parsed']
#manipulations on time
dt = datetime.datetime.fromtimestamp(mktime(dateitem))
day_difference = (dt - datenow).days
print day_difference
if day_difference == 0: feedday = 'Today ' + dt.strftime("%H:%M")
elif day_difference == -1: feedday = 'Yesterday ' + dt.strftime("%H:%M")
else: feedday = dt.strftime("%d-%m-%Y %H:%M")
links = entry['links']
img = ''
for link in links:
if 'image' in link['type']:
img = link['href'].replace('150x150','768x576')
#fix pt rss
if not img:
if "<div id=" in content:
imgfilter = re.compile('src="(.+?)"').findall(content)
if imgfilter: img = imgfilter[0].replace('143x81','400x225')
contentfilter = re.compile('<div>(.+?)</div>').findall(content)
if contentfilter: content = contentfilter[0]
return_list.append([title,feedday,content,img])
return return_list
|
enen92/script.sportscenter
|
resources/lib/centerutils/rssparser.py
|
Python
|
gpl-2.0
| 1,342
|
#!/usr/bin/env python2.7
#coding=utf-8
import logging
import time
import sys
sys.path.append("..")
import conf.conf as conf
import mylog
import MySQLdb
import tushare as ts
from sqlalchemy import create_engine
def getEngine(typeStr='mysql'):
return create_engine('mysql://%s:%s@%s/%s?charset=%s'%(
conf.mysqlConfig['user'],
conf.mysqlConfig['passwd'],
conf.mysqlConfig['host'],
conf.DB_NAME,
conf.mysqlConfig['charset']))
def getConn():
conf.mysqlConfig["db"] = conf.DB_NAME
try:
conn = MySQLdb.connect(**conf.mysqlConfig)
except Exception,e:
# init the db
if e.args[0] == 1049:
conf.mysqlConfig.pop("db")
conn = MySQLdb.connect(**conf.mysqlConfig)
else:
raise e
return conn
def executeSQL(sql):
'''
this wrapper sucks. cannot get any data out, just execute a sql.
'''
conn = getConn()
cursor = conn.cursor()
logging.info("Execute sql: %s"%sql)
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
def executemany(sql_template, args):
'''
this wrapper sucks. cannot get any data out, just execute a sql.
'''
conn = getConn()
cursor = conn.cursor()
logging.info("Execute sql: %s to save %d lines of data."%(sql_template, len(args)))
cursor.executemany(sql_template, args)
conn.commit()
cursor.close()
conn.close()
def downloadStockBasics():
stockBasics = ts.get_stock_basics()
#stockBasics.insert(0,"update_date",time.strftime( conf.ISO_DATE_FORMAT, time.localtime()),True)
executeSQL("delete from t_stock_basics")
stockBasics.to_sql(name="t_stock_basics",con=getEngine(),if_exists="append")
return stockBasics
def splitDateRange(startDate, endDate,timeToMarket=None):
"""The (startDate, endDate) may span over a very large range,
and this is not good for performance consideration if the downloader
is based on the tusahre lib which is our primary downloader.
And this method is called to split the range
into several smaller(year) range.
"""
_ranges = []
if timeToMarket is not None:
timeToMarket = str(timeToMarket)
timeToMarket = timeToMarket[:4]+"-"+timeToMarket[4:6]+"-"+timeToMarket[6:]
if timeToMarket > startDate:
startDate = timeToMarket
years = range(int(startDate.split('-')[0]), int(endDate.split('-')[0])+1)
# only one year
if len(years) == 1:
_ranges.append((startDate, endDate))
return _ranges
for idx, y in enumerate(years):
if idx == 0:
_ranges.append((startDate, "%d-12-31"%y))
elif idx == len(years) - 1:
_ranges.append(("%d-01-01"%y, endDate))
else:
_ranges.append(("%d-01-01"%y, "%d-12-31"%y))
return _ranges
# test codes
if "__main__" == __name__:
downloadStockBasics()
|
hisen630/my_stock
|
lib/utils.py
|
Python
|
gpl-2.0
| 2,942
|
# -*- coding: utf-8 -*-
__author__ = 'tobin'
from skimage import morphology
import numpy as np
import cv2
def get_skeleton(region):
result = cv2.threshold(region, 0, 255, cv2.THRESH_OTSU)[1]
result = morphology.skeletonize(result > 0)
result = result.astype(np.uint8) * 255
return result
|
skyczhao/silver
|
Road/marching/skeleton.py
|
Python
|
gpl-2.0
| 306
|
#!/usr/bin/env python3
import os
import subprocess
import sys
if not os.environ.get('DESTDIR'):
print('Compiling gsettings schemas...')
subprocess.call(['glib-compile-schemas', sys.argv[1]])
|
GNOME/gnome-user-share
|
meson_post_install.py
|
Python
|
gpl-2.0
| 200
|
import pytest
import fauxfactory
from cfme.configure.settings import DefaultView
from cfme.automate.service_dialogs import DialogCollection
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.services.catalogs.catalog import Catalog
from cfme.services.catalogs.orchestration_template import OrchestrationTemplate
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.services.myservice import MyService
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.stack import StackCollection
from cfme import test_requirements
from cfme.utils import testgen
from cfme.utils.conf import credentials
from cfme.utils.path import orchestration_path
from cfme.utils.datafile import load_data_file
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.ignore_stream("upstream"),
test_requirements.stack,
pytest.mark.tier(2)
]
pytest_generate_tests = testgen.generate(
[CloudProvider], required_fields=[
['provisioning', 'stack_provisioning']
],
scope="module")
@pytest.yield_fixture(scope="function")
def template(provider, provisioning, setup_provider):
template_type = provisioning['stack_provisioning']['template_type']
template_name = fauxfactory.gen_alphanumeric()
template = OrchestrationTemplate(template_type=template_type,
template_name=template_name)
if provider.type == "ec2":
data_file = load_data_file(str(orchestration_path.join('aws_vm_template.json')))
elif provider.type == "openstack":
data_file = load_data_file(str(orchestration_path.join('openstack_vm_template.data')))
elif provider.type == "azure":
data_file = load_data_file(str(orchestration_path.join('azure_vm_template.json')))
template.create(data_file.read().replace('CFMETemplateName', template_name))
if provider.type == "azure":
dialog_name = "azure-single-vm-from-user-image"
else:
dialog_name = "dialog_" + fauxfactory.gen_alphanumeric()
if provider.type != "azure":
template.create_service_dialog_from_template(dialog_name, template.template_name)
yield template, dialog_name
@pytest.yield_fixture(scope="function")
def dialog(appliance, provider, template):
template, dialog_name = template
service_name = fauxfactory.gen_alphanumeric()
element_data = dict(
ele_label="ele_" + fauxfactory.gen_alphanumeric(),
ele_name="service_name",
ele_desc="my ele desc",
choose_type="Text Box",
default_text_box=service_name
)
dialog = DialogCollection(appliance)
sd = dialog.instantiate(label=dialog_name)
tab = sd.tabs.instantiate(tab_label="Basic Information")
box = tab.boxes.instantiate(box_label="Options")
element = box.elements.instantiate(element_data=element_data)
element.add_another_element(element_data)
yield template, sd, service_name
@pytest.yield_fixture(scope="function")
def catalog():
cat_name = "cat_" + fauxfactory.gen_alphanumeric()
catalog = Catalog(name=cat_name, description="my catalog")
catalog.create()
yield catalog
@pytest.yield_fixture(scope="function")
def catalog_item(dialog, catalog, template, provider):
template, dialog, service_name = dialog
item_name = service_name
catalog_item = CatalogItem(item_type="Orchestration",
name=item_name,
description="my catalog",
display_in=True,
catalog=catalog,
dialog=dialog,
orch_template=template,
provider=provider)
catalog_item.create()
yield catalog_item, template
def random_desc():
return fauxfactory.gen_alphanumeric()
def prepare_stack_data(provider, provisioning):
random_base = fauxfactory.gen_alphanumeric()
stackname = 'test-stack-{}'.format(random_base)
vm_name = 'test-stk-{}'.format(random_base)
stack_timeout = "20"
if provider.one_of(AzureProvider):
size, resource_group, os_type, mode = map(provisioning.get,
('vm_size', 'resource_group', 'os_type', 'mode'))
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
# service order appends a type string to the template name
user_image = 'Windows | {}'.format(template.name)
stack_data = {
'stack_name': stackname,
'vm_name': vm_name,
'resource_group': resource_group,
'mode': mode,
'vm_user': vm_user,
'vm_password': vm_password,
'user_image': user_image,
'os_type': os_type,
'vm_size': size
}
elif provider.type == 'openstack':
stack_prov = provisioning['stack_provisioning']
stack_data = {
'stack_name': stackname,
'key': stack_prov['key_name'],
'flavor': stack_prov['instance_type'],
}
else:
stack_prov = provisioning['stack_provisioning']
stack_data = {
'stack_name': stackname,
'stack_timeout': stack_timeout,
'vm_name': vm_name,
'key_name': stack_prov['key_name'],
'select_instance_type': stack_prov['instance_type'],
'ssh_location': provisioning['ssh_location']
}
return stack_data
def test_provision_stack(appliance, setup_provider, provider, provisioning, catalog, catalog_item,
request):
"""Tests stack provisioning
Metadata:
test_flag: provision
"""
catalog_item, template = catalog_item
stack_data = prepare_stack_data(provider, provisioning)
@request.addfinalizer
def _cleanup_vms():
clean_up(stack_data, provider)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
catalog_item.name, stack_data)
service_catalogs.order()
logger.info('Waiting for cfme provision request for service {}'.format(catalog_item.name))
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request()
assert provision_request.is_succeeded()
def test_reconfigure_service(appliance, provider, provisioning, catalog, catalog_item, request):
"""Tests stack provisioning
Metadata:
test_flag: provision
"""
catalog_item, template = catalog_item
stack_data = prepare_stack_data(provider, provisioning)
@request.addfinalizer
def _cleanup_vms():
clean_up(stack_data, provider)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
catalog_item.name, stack_data)
service_catalogs.order()
logger.info('Waiting for cfme provision request for service {}'.format(catalog_item.name))
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request()
assert provision_request.is_succeeded()
myservice = MyService(appliance, catalog_item.name)
myservice.reconfigure_service()
def test_remove_template_provisioning(appliance, provider, provisioning, catalog, catalog_item):
"""Tests stack provisioning
Metadata:
test_flag: provision
"""
catalog_item, template = catalog_item
stack_data = prepare_stack_data(provider, provisioning)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
catalog_item.name, stack_data)
service_catalogs.order()
# This is part of test - remove template and see if provision fails , so not added as finalizer
template.delete()
request_description = 'Provisioning Service [{}] from [{}]'.format(catalog_item.name,
catalog_item.name)
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui')
assert (provision_request.row.last_message.text == 'Service_Template_Provisioning failed' or
provision_request.row.status.text == "Error")
def test_retire_stack(appliance, provider, provisioning, catalog, catalog_item, request):
"""Tests stack provisioning
Metadata:
test_flag: provision
"""
catalog_item, template = catalog_item
DefaultView.set_default_view("Stacks", "Grid View")
stack_data = prepare_stack_data(provider, provisioning)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog,
catalog_item.name, stack_data)
service_catalogs.order()
logger.info('Waiting for cfme provision request for service {}'.format(catalog_item.name))
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request()
assert provision_request.is_succeeded()
stack = StackCollection(appliance).instantiate(stack_data['stack_name'], provider=provider)
stack.wait_for_exists()
stack.retire_stack()
@request.addfinalizer
def _cleanup_vms():
clean_up(stack_data, provider)
def clean_up(stack_data, provider):
try:
# stack_exist returns 400 if stack ID not found, which triggers an exception
if provider.mgmt.stack_exist(stack_data['stack_name']):
wait_for(lambda: provider.mgmt.delete_stack(stack_data['stack_name']),
delay=10, num_sec=800, message="wait for stack delete")
if provider.type == 'azure' and provider.mgmt.does_vm_exist(stack_data['vm_name']):
wait_for(lambda: provider.mgmt.delete_vm(stack_data['vm_name']),
delay=10, num_sec=800, message="wait for vm delete")
catalog_item.orch_template.delete()
except Exception as ex:
logger.warning('Exception while checking/deleting stack, continuing: {}'
.format(ex.message))
pass
|
okolisny/integration_tests
|
cfme/tests/services/test_provision_stack.py
|
Python
|
gpl-2.0
| 10,894
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
import re
import csv
def download_page(url):
"""Downloads page by URL and puts it into the folder
'html_pages'"""
print(url)
success = True
try:
page = urllib.request.urlopen(url)
except HTTPError:
success = False
page = page.read()
page = page.decode("utf-8")
filename = re.sub('/', '_', re.findall('/.*?$', url)[0])
filename = re.sub(':', '-', filename)
if success == True:
with open('pages/' + filename, 'w', encoding='utf-8') as local_file:
local_file.write(page)
for i in range(347, 348):
download_page('https://www.hse.ru/edu/vkr/index.html?page=' + str(i) + '&textAvailable=2')
|
school-tagger/school
|
crawler/crawler.py
|
Python
|
gpl-2.0
| 739
|
#! /usr/bin/env python
#
# Copyright (C) 2013-2016 Fabian Gieseke <fabian.gieseke@di.ku.dk>
# License: GPL v2
#
# Inspired by https://github.com/scikit-learn/scikit-learn/blob/master/setup.py
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
#
import os
import sys
import shutil
from distutils.command.clean import clean
# set flag to indicate that package
# is installed (similar to scikit-learn
# installation routine)
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__BUFFERKDTREE_SETUP__ = True
DISTNAME = 'bufferkdtree'
DESCRIPTION = 'A Python library for large-scale exact nearest neighbor search using Buffer k-d Trees (bufferkdtree).'
LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Fabian Gieseke'
MAINTAINER_EMAIL = 'fabian.gieseke@di.ku.dk'
URL = 'https://github.com/gieseke/bufferkdtree'
LICENSE = 'GNU GENERAL PUBLIC LICENSE Version 2'
DOWNLOAD_URL = 'https://github.com/gieseke/bufferkdtree'
# adapted from scikit-learn
if len(set(('develop', 'release')).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = dict(zip_safe=False)
else:
extra_setuptools_args = dict()
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True,
)
config.add_subpackage('bufferkdtree')
return config
class CleanCommand(clean):
description = "Cleaning up code ..."
def run(self):
clean.run(self)
# remove hidden '~' files
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('~'):
os.unlink(os.path.join(dirpath, filename))
# build related files and directories
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('bufferkdtree.egg-info'):
shutil.rmtree('bufferkdtree.egg-info')
if os.path.exists('docs/_build'):
shutil.rmtree('docs/_build')
# remaining files and directories in bufferkdtree dir (recursively)
for dirpath, dirnames, filenames in os.walk('bufferkdtree'):
for filename in filenames:
if (filename.endswith('.so') or \
filename.endswith('.pyd') or \
filename.endswith('.dll') or \
filename.endswith('.pyc') or \
filename.endswith('_wrap.c') or \
filename.startswith('wrapper_') or \
filename.endswith('~')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__' or dirname == 'build' or dirname == '_build':
shutil.rmtree(os.path.join(dirpath, dirname))
try:
shutil.rmtree("dist")
except:
pass
def setup_package():
import bufferkdtree
VERSION = bufferkdtree.__version__
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
cmdclass={'clean': CleanCommand},
setup_requires=["numpy>=1.11.0"],
install_requires=["numpy>=1.11.0"],
include_package_data=True,
package_data={'bufferkdtree': ['src/neighbors/brute/kernels/opencl/*.cl',
'src/neighbors/buffer_kdtree/kernels/*.cl'
]},
**extra_setuptools_args)
if (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--version', 'clean'))):
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
setup(**metadata)
else:
# if pip is installed, make sure that numpy
# is installed (pip is not a requirement
# for the bufferkdtree package)
try:
import pip
pip.main(["install", "numpy>=1.11.0"])
except:
pass
try:
from numpy.distutils.core import setup as numpy_setup
metadata['configuration'] = configuration
numpy_setup(**metadata)
except Exception as e:
print("Could not install package: %s" % str(e))
sys.exit(0)
if __name__ == "__main__":
setup_package()
|
gieseke/bufferkdtree
|
setup.py
|
Python
|
gpl-2.0
| 5,896
|
'''
Author: Julian van Eyken Date: May 15 2013
Package/class for handling of images created from photon lists that are derotated and
mapped to sky coordinates.
**Temporary version 2 of RADecImage, attempting to reduce memory useage a bit.
This is currently not the main version to use!**
'''
import time
import numpy as np
import tables
import matplotlib.pyplot as mpl
import hotpix.hotPixels as hp
from util import utils
import photonlist.photlist as pl
from photonlist import boxer
from astrometry import CalculateRaDec as crd
from headers import pipelineFlags
class RADecImage(object):
'''
Class to hold derotated, integrated and possibly stacked images, in the sky coordinate
frame.
'''
def __init__(self,photList=None,nPixRA=None,nPixDec=None,cenRA=None,cenDec=None,
vPlateScale=0.1, detPlateScale=None, firstSec=0, integrationTime=-1):
#expWeightTimeStep=1.0):
'''
Initialise a (possibly empty) RA-dec coordinate frame image.
INPUTS:
photList: optionally provide a PhotList object from which to create an
image (see photonlist.photlist)
nPixRA, nPixDec: integers, Number of pixels in RA and Dec directions
for the virtual image.
cenRA, cenDec: floats, location of center of virtual image in RA and
dec (both in radians)
vPlateScale: float, plate scale for virtual image (arcseconds per
virtual image pixel). Note that the attribute, self.vPlateScale
is stored in *radians* per pixel, as is self.detPlateScale (plate
scale for the detector pixels).
detPlateScale: override the assumed detector plate scale (arcseconds
per detector pixel)
firstSec: float, time from beginning of photon-list file at which to
begin integration
integrationTime: float, length of time to integrate for in seconds. If
-1, integrate to end of photon list.
#### DEPRECATED #####
expWeightTimeStep: float, time step to use when calculating exposure
time weights for the virtual pixels (seconds).
#####################
'''
self.nPixRA = nPixRA #No. of virtual pixels in RA direction
self.nPixDec = nPixDec #No. of virtual pixels in dec. direction
self.cenRA = cenRA #RA location of center of field (radians)
self.cenDec = cenDec #Dec location of center of field (rad)
self.vPlateScale = vPlateScale*2*np.pi/1296000 #No. of radians on sky per virtual pixel.
self.imageIsLoaded = False #Flag to indicate whether an actual image has been loaded yet.
if detPlateScale is None:
self.detPlateScale = crd.CalculateRaDec.platescale*2*np.pi/1296000 #Radians per detector pixel. ******For now - but this really needs reading in from the photon list file.
else:
self.detPlateScale = detPlateScale
if nPixRA is not None and nPixDec is not None:
self.image = np.empty((self.nPixDec,self.nPixRA),dtype=float) #To take a (possibly stacked) image in virtual
self.image.fill(np.nan)
self.effIntTimes = np.empty_like(self.image) #Effective integration times for each pixel, in seconds.
self.effIntTimes.fill(np.nan)
self.expTimeWeights = np.empty((self.nPixDec,self.nPixRA),dtype=float) #Weights for each pixel in the virtual image to account for effective integration time on each pixel.
self.expTimeWeights.fill(np.nan)
self.gridRA = np.empty((self.nPixRA),dtype=float) #Virtual pixel boundaries in the RA direction
self.gridRA.fill(np.nan)
self.gridDec = np.empty((self.nPixDec),dtype=float) #Virtual pixel boundaries in the dec. direction.
self.gridDec.fill(np.nan)
self.totExpTime = np.nan #Total exposure time included in current image
#self.expWeightTimeStep = expWeightTimeStep
else:
self.image = None
self.effIntTimes = None
self.expTimeWeights = None
self.gridRA = None
self.gridDec = None
self.totExpTime = None
#self.expWeightTimeStep = expWeightTimeStep
if (cenRA is not None and cenDec is not None and vPlateScale is not None
and nPixRA is not None and nPixDec is not None):
self.setCoordGrid()
if photList is not None:
self.loadImage(photList,firstSec=firstSec,integrationTime=integrationTime)
def setCoordGrid(self):
'''
Establish RA and dec coordinates for pixel boundaries in the virtual pixel grid,
given the number of pixels in each direction (self.nPixRA and self.nPixDec), the
location of the centre of the array (self.cenRA, self.cenDec), and the plate scale
(self.vPlateScale).
'''
#self.gridRA = np.empty((self.nPixDec,self.nPixRA),dtype=float)
#self.gridRA.fill(np.nan)
#self.gridDec = np.empty((self.nPixDec,self.nPixRA),dtype=float)
#self.gridDec.fill(np.nan)
#Note - +1's are because these are pixel *boundaries*, not pixel centers:
self.gridRA = self.cenRA + (self.vPlateScale*(np.arange(self.nPixRA+1) - ((self.nPixRA+1)//2)))
self.gridDec = self.cenDec + (self.vPlateScale*(np.arange(self.nPixDec+1) - ((self.nPixDec+1)//2)))
def loadImage(self,photList,firstSec=0,integrationTime=-1,wvlMin=None,wvlMax=None,
doStack=False, #expWeightTimeStep=None,
savePreStackImage=None, doWeighted=True): #savePreStackImage is temporary for test purposes
'''
Build a de-rotated stacked image from a photon list (PhotList) object.
If the RADecImage instance already contains an image, the new image is added to it.
INPUTS:
photList - a PhotList object from which to construct the image.
firstSec - time from start of exposure to start the 'integration' for the image (seconds)
integrationTime - duration of integration time to include in the image (in seconds; -1 or NaN => to end of exposure)
wvlMin, wvlMax - min and max wavelengths of photons to include in the image (Angstroms).
doStack - boolean; if True, then stack the image to be loaded on top of any image data already present.
#### DEPRECATED - NOW GETS TIME STEPS STRAIGHT FROM CENTROID LIST FILES #####
expWeightTimeStep - see __init__. If set here, overrides any value already set in the RADecImage object.
If the new image is being stacked on top of a current image, a new value can be
supplied that is different from the current image's value; but only the last value used
(i.e. the one supplied) will be stored in the class attribute.
################################
wvlMin, wvlMax - set min and max wavelength cutoffs for photons to be loaded in.
savePreStackImage - temporary fudge, set to a file-name to save the image out to a file prior to stacking.
doWeighted - if True, includes flat and flux weighting (i.e. flatfielding and spectral response)factors from photons,
and rejects photons from pixels where the flatfield is bad at any wavelength within the requested
wavelength range (all if wvlMin/wvl Max not specified).
****NOTE - FLUX WEIGHTING NOT FULLY TESTED -- but looks probably okay.****
'''
#posErr = 0.8 #Approx. position error in arcsec (just a fixed estimate for now, will improve later)
#posErr *= 2*np.pi/(60.*60.*360.) #Convert to radians
imLoadTic = time.clock()
photTable = photList.file.root.photons.photons #Shortcut to table
#if expWeightTimeStep is not None:
# self.expWeightTimeStep=expWeightTimeStep
if wvlMin is not None and wvlMax is None: wvlMax = np.inf
if wvlMin is None and wvlMax is not None: wvlMin = 0.0
#Figure out last second of integration
obsFileExpTime = photList.header.cols.exptime[0]
if integrationTime==-1 or firstSec+integrationTime > obsFileExpTime:
lastSec = obsFileExpTime
else:
lastSec = firstSec+integrationTime
#If virtual coordinate grid is not yet defined, figure it out.
if self.gridRA is None or self.gridDec is None:
#Find RA/dec range needed, taking advantage of the fact that the ra/dec columns are (or should be) indexed....
print 'Finding RA/dec ranges'
self.raMin = photTable.cols.ra[photTable.colindexes['ra'][0]]
self.raMax = photTable.cols.ra[photTable.colindexes['ra'][-1]]
self.decMin = photTable.cols.dec[photTable.colindexes['dec'][0]]
self.decMax = photTable.cols.dec[photTable.colindexes['dec'][-1]]
self.cenRA = (self.raMin+self.raMax)/2.0
self.cenDec = (self.decMin+self.decMax)/2.0
#Set size of virtual grid to accommodate.
if self.nPixRA is None:
#+1 for round up; +1 because coordinates are the boundaries of the virtual pixels, not the centers.
self.nPixRA = int((self.raMax-self.raMin)//self.vPlateScale + 2)
if self.nPixDec is None:
self.nPixDec = int((self.decMax-self.decMin)//self.vPlateScale + 2)
self.setCoordGrid()
#Short-hand notations for no. of detector and virtual pixels, just for clarity:
nDPixRow,nDPixCol = photList.nRow,photList.nCol
nVPixRA,nVPixDec = self.nPixRA,self.nPixDec
#Calculate ratio of virtual pixel area to detector pixel area
vdPixAreaRatio = (self.vPlateScale/self.detPlateScale)**2
#Make a boolean mask of dead (non functioning for whatever reason) pixels
#True (1) = good; False (0) = dead
#First on the basis of the wavelength cals:
wvlCalFlagImage = photList.getBadWvlCalFlags()
deadPixMask = np.where(wvlCalFlagImage == pipelineFlags.waveCal['good'], 1, 0) #1.0 where flag is good; 0.0 otherwise. (Straight boolean mask would work, but not guaranteed for Python 4....)
#Next on the basis of the flat cals (or all ones if weighting not requested)
if doWeighted:
flatCalFlagArray = photList.file.root.flatcal.flags.read() # 3D array - nRow * nCol * nWavelength Bins.
flatWvlBinEdges = photList.file.root.flatcal.wavelengthBins.read() # 1D array of wavelength bin edges for the flat cal.
lowerEdges = flatWvlBinEdges[0:-1]
upperEdges = flatWvlBinEdges[1:]
if wvlMin is None and wvlMax is None:
inRange = np.ones(len(lowerEdges),dtype=bool) # (all bins in range implies all True)
else:
inRange = ((lowerEdges >= wvlMin) & (lowerEdges < wvlMax) |
(upperEdges >= wvlMin) & (lowerEdges < wvlMax))
flatCalMask = np.where(np.all(flatCalFlagArray[:,:,inRange]==False, axis=2), 1, 0) # Should be zero where any pixel has a bad flag at any wavelength within the requested range; one otherwise. Spot checked, seems to work.
else:
flatCalMask = np.ones((nDPixRow,nDPixCol))
#If hot pixels time-mask data not already parsed in, then parse it.
if photList.hotPixTimeMask is None:
photList.parseHotPixTimeMask() #Loads time mask dictionary into photList.hotPixTimeMask
#First find start/end times of each timestep ('frame') for calculating effective exp. times
#and for subdividing the image data (the latter is only needed for the purposes of
#splitting the data into small chunks so it'll fit in memory easily).
#Use the same timesteps as used in calculating the astrometry.
tStartFramesAll = np.array(photList.file.root.centroidList.times.read()) #Convert to array, since it's saved as a list.
tEndFramesAll = np.append(tStartFramesAll[1:], np.inf) #Last frame goes on forever as far as we know at the moment
withinIntegration = ((tStartFramesAll < lastSec) & (tEndFramesAll > firstSec))
tStartFrames = tStartFramesAll[withinIntegration].clip(min=firstSec) #Now clip so that everything is within the requested integration time.
tEndFrames = tEndFramesAll[withinIntegration].clip(max=lastSec)
nFrames = len(tStartFrames)
assert nFrames > 0 #Otherwise we have a problem....
assert np.all(tStartFrames <= lastSec) and np.all(tEndFrames >= firstSec)
#Get x,y locations of detector pixel corners (2D array of each x,y value, in detector space)
dPixXmin = np.indices((nDPixRow,nDPixCol))[1] - 0.5
dPixXmax = np.indices((nDPixRow,nDPixCol))[1] + 0.5
dPixYmin = np.indices((nDPixRow,nDPixCol))[0] - 0.5
dPixYmax = np.indices((nDPixRow,nDPixCol))[0] + 0.5
dPixXminFlat = dPixXmin.flatten() #Flattened versions of the same since getRaDec() only works on flat arrays.
dPixXmaxFlat = dPixXmax.flatten()
dPixYminFlat = dPixYmin.flatten()
dPixYmaxFlat = dPixYmax.flatten()
#Create (1D) arrays for normalised center locations of virtual pixel grid (=index numbers, representing location of unit squares)
vPixRANormCen = np.arange(nVPixRA) #np.indices(nVPixDec,nVPixRA)[1]
vPixDecNormCen = np.arange(nVPixDec) #np.indices(nVPixDec,nVPixRA)[0]
#Create 1D arrays marking edges of virtual pixels (in 'normalised' space...)
vPixRANormMin = np.arange(nVPixRA)-0.5
vPixRANormMax = np.arange(nVPixRA)+0.5
vPixDecNormMin = np.arange(nVPixDec)-0.5
vPixDecNormMax = np.arange(nVPixDec)+0.5
#Find origin of virtual array (center of virtual pixel 0,0) in RA/dec space.
vPixOriginRA = np.mean(self.gridRA[0:2])
vPixOriginDec = np.mean(self.gridDec[0:2])
vPixSize = self.vPlateScale #Short hand, Length of side of virtual pixel in radians (assume square pixels)
#Make arrays to take the total exposure times and image data for each virtual pixel at each time step
vExpTimesStack = np.zeros((nVPixDec,nVPixRA,nFrames))
imageStack = np.zeros((nVPixDec,nVPixRA,nFrames))
#And one for the total exposure time at each pixel summed over all time steps
vExpTimes = np.zeros((nVPixDec,nVPixRA))
#Array to hold list of (equal) timestamps for each pixel at each timestep
#(just for calculating the RA/dec coordinates of the pixel corners)
frameTimeFlat = np.zeros((nDPixRow*nDPixCol)) #Also flat array for the purposes of getRaDec()
frameTimeFlat.fill(np.nan)
#Initialise RA/dec calculations of pixel locations for exposure time weighting
raDecCalcObject = crd.CalculateRaDec(photList.file.root.centroidList)
#------------ Loop through the time steps ----------
for iFrame in range(nFrames):
print 'Time slice: ',iFrame+1, '/', nFrames
#-------------Make image for this time step-----------
#Get the photons
print 'Getting photon coords'
print 'wvlMin, wvlMax: ',wvlMin,wvlMax
if wvlMin is None:
assert wvlMin is None and wvlMax is None
print '(getting all wavelengths)'
tic = time.clock()
strt, fin = tStartFrames[iFrame], tEndFrames[iFrame] #Just because Numexpr can't handle indexing, it seems
photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<fin)')
#photons = np.array([row.fetch_all_fields() for row in photTable.where('(arrivalTime>=strt) & (arrivalTime<=fin)')])
#photIndices = photTable.getWhereList('(arrivalTime>=strt) & (arrivalTime<=fin)')
print 'Time taken (s): ',time.clock()-tic
else:
assert wvlMin is not None and wvlMax is not None
print '(trimming wavelength range) '
photons = photTable.readWhere('(arrivalTime>=strt) & (arrivalTime<=fin) & (wavelength>=wvlMin) & (wavelength<=wvlMax)')
#Filter out photons to be masked out on the basis of detector pixel
print 'Finding bad detector pixels...'
detPixMask = deadPixMask * flatCalMask #Combine wave cal pixel mask and flat cal mask (should be the same in an ideal world, but not
whereBad = np.where(detPixMask == 0)
badXY = pl.xyPack(whereBad[0],whereBad[1]) #Array of packed x-y values for bad pixels (CHECK X,Y THE RIGHT WAY ROUND!)
allPhotXY = photons['xyPix'] #Array of packed x-y values for all photons
#Get a boolean array indicating photons whose packed x-y coordinate value is in the 'bad' list.
toReject = np.where(np.in1d(allPhotXY,badXY))[0] #Zero to take index array out of the returned 1-element tuple.
#Chuck out the bad photons
print 'Rejecting photons from bad pixels...'
photons = np.delete(photons,toReject)
#Pull out needed information
print 'Pulling out relevant columns'
photRAs = photons['ra'] #Read all photon coords into an RA and a dec array.
photDecs = photons['dec']
photHAs = photons['ha'] #Along with hour angles...
photWeights = photons['flatWeight'] * photons['fluxWeight'] #********EXPERIMENTING WITH ADDING FLUX WEIGHT - NOT FULLY TESTED, BUT SEEMS OKAY....********
print 'INCLUDING FLUX WEIGHTS!'
photWavelengths = photons['wavelength']
del(photons) #Not needed till next iteration, and it takes up a lot of memory....
if wvlMin is not None or wvlMax is not None:
assert all(photWavelengths>=wvlMin) and all(photWavelengths<=wvlMax)
print 'Min, max photon wavelengths found: ', np.min(photWavelengths), np.max(photWavelengths)
nPhot = len(photRAs)
#Add uniform random dither to each photon, distributed over a square
#area of the same size and orientation as the originating pixel at
#the time of observation.
xRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0
yRand = np.random.rand(nPhot)*self.detPlateScale-self.detPlateScale/2.0 #Not the same array!
ditherRAs = xRand*np.cos(photHAs) - yRand*np.sin(photHAs)
ditherDecs = yRand*np.cos(photHAs) + xRand*np.sin(photHAs)
photRAs=photRAs+ditherRAs
photDecs=photDecs+ditherDecs
#Make the image for this time slice
if doWeighted:
print 'Making weighted image'
imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA],
weights=photWeights)
else:
print 'Making unweighted image'
imageStack[:,:,iFrame],thisGridDec,thisGridRA = np.histogram2d(photDecs,photRAs,[self.gridDec,self.gridRA])
if savePreStackImage is not None:
saveName = 'det'+str(start)+'-'+str(fin)+'s-'+savePreStackImage
print 'Making det-frame image for diagnostics: '+saveName
detImSlice = np.histogram2d(photons['yPix'],photons['xPix'])
mpl.imsave(fname=saveName,arr=detImSlice,origin='lower',
cmap=mpl.cm.gray,vmin=np.percentile(detImSlice, 0.5), vmax=np.percentile(detImSlice,99.5))
#----------Now start figuring out effective exposure times for each virtual pixel----------------
#And start figuring out the exposure time weights....
print 'Calculating effective exposure times'
#Calculate detector pixel corner locations in RA/dec space (needs to be clockwise in RA/dec space! (checked, gives +ve answers).
frameTimeFlat.fill(tStartFrames[iFrame])
dPixRA1,dPixDec1,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYminFlat) #dPix* should all be flat
dPixRA2,dPixDec2,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXminFlat,dPixYmaxFlat)
dPixRA3,dPixDec3,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYmaxFlat)
dPixRA4,dPixDec4,dummy = raDecCalcObject.getRaDec(frameTimeFlat,dPixXmaxFlat,dPixYminFlat)
#Normalise to scale where virtual pixel size=1 and origin is the origin of the virtual pixel grid
dPixNormRA1 = (dPixRA1 - vPixOriginRA)/vPixSize #dPixNorm* should all be flat.
dPixNormRA2 = (dPixRA2 - vPixOriginRA)/vPixSize
dPixNormRA3 = (dPixRA3 - vPixOriginRA)/vPixSize
dPixNormRA4 = (dPixRA4 - vPixOriginRA)/vPixSize
dPixNormDec1 = (dPixDec1 - vPixOriginDec)/vPixSize
dPixNormDec2 = (dPixDec2 - vPixOriginDec)/vPixSize
dPixNormDec3 = (dPixDec3 - vPixOriginDec)/vPixSize
dPixNormDec4 = (dPixDec4 - vPixOriginDec)/vPixSize
#Get min and max RA/decs for each of the detector pixels
dPixCornersRA = np.array([dPixNormRA1,dPixNormRA2,dPixNormRA3,dPixNormRA4]) #2D array, 4 by nRow*nCol - should be clockwise, I think!
dPixCornersDec = np.array([dPixNormDec1,dPixNormDec2,dPixNormDec3,dPixNormDec4])
#dPixCornersRA = np.array([dPixNormRA4,dPixNormRA3,dPixNormRA2,dPixNormRA1]) #2D array, 4 by nRow*nCol - reversed, but gives -ve results, so prob. anti-clockwise....
#dPixCornersDec = np.array([dPixNormDec4,dPixNormDec3,dPixNormDec2,dPixNormDec1])
dPixRANormMin = dPixCornersRA.min(axis=0) #Flat 1D array, nRow * nCol
dPixRANormMax = dPixCornersRA.max(axis=0)
dPixDecNormMin = dPixCornersDec.min(axis=0)
dPixDecNormMax = dPixCornersDec.max(axis=0)
#Get array of effective exposure times for each detector pixel based on the hot pixel time mask
#Multiply by the bad pixel mask and the flatcal mask so that non-functioning pixels have zero exposure time.
#Flatten the array in the same way as the previous arrays (1D array, nRow*nCol elements).
detExpTimes = (hp.getEffIntTimeImage(photList.hotPixTimeMask, integrationTime=tEndFrames[iFrame]-tStartFrames[iFrame],
firstSec=tStartFrames[iFrame]) * detPixMask).flatten()
#Loop over the detector pixels and accumulate the exposure time that falls in each
#tic = time.clock()
for iDPix in np.arange(nDPixRow * nDPixCol):
#Find the pixels which are likely to be overlapping (note - could do this as a sorted search to make things faster)
maybeOverlappingRA = np.where((dPixRANormMax[iDPix] > vPixRANormMin) & (dPixRANormMin[iDPix] < vPixRANormMax))[0]
maybeOverlappingDec = np.where((dPixDecNormMax[iDPix] > vPixDecNormMin) & (dPixDecNormMin[iDPix] < vPixDecNormMax))[0]
for overlapLocRA in maybeOverlappingRA:
for overlapLocDec in maybeOverlappingDec:
overlapFrac = boxer.boxer(overlapLocDec,overlapLocRA,dPixCornersDec[:,iDPix],dPixCornersRA[:,iDPix])
expTimeToAdd = overlapFrac*detExpTimes[iDPix]
vExpTimesStack[overlapLocDec,overlapLocRA,iFrame] += expTimeToAdd
#print 'Time taken (s): ',time.clock()-tic
#------------ End loop through time steps ----------
#Sum up the exposure times from each frame:
vExpTimes = np.sum(vExpTimesStack,axis=2)
thisImage = np.sum(imageStack,axis=2)
#Check that wherever the exposure time is zero, there are no photons that have not been rejected
#assert np.all(thisImage[vExpTimes==0] == 0)
#assert 1==0
#Temporary for testing-------------
if savePreStackImage is not None:
print 'Saving pre-stacked image to '+savePreStackImage
mpl.imsave(fname=savePreStackImage,arr=thisImage,origin='lower',cmap=mpl.cm.gray,
vmin=np.percentile(thisImage, 0.5), vmax=np.percentile(thisImage,99.5))
#---------------------------------
if self.imageIsLoaded is False or doStack is False:
self.image = thisImage #For now, let's keep it this way.... Since weighting does odd things.
self.effIntTimes = vExpTimes
self.totExpTime = lastSec-firstSec
self.expTimeWeights = self.totExpTime/self.effIntTimes
self.vExpTimesStack = vExpTimesStack #TEMPORARY FOR DEBUGGING PURPOSES
self.imageIsLoaded = True
else:
assert self.imageIsLoaded == True
print 'Stacking'
self.image += thisImage
self.effIntTimes += vExpTimes
self.totExpTime += lastSec-firstSec
self.expTimeWeights = self.totExpTime/self.effIntTimes
print 'Image load done. Time taken (s): ', time.clock()-imLoadTic
def display(self,normMin=None,normMax=None,expWeight=True,pclip=None,colormap=mpl.cm.gnuplot2,
image=None, logScale=False):
'''
Display the current image. Currently just a short-cut to utils.plotArray,
but needs updating to mark RA and Dec on the axes.
'''
if expWeight:
toDisplay = np.copy(self.image*self.expTimeWeights)
else:
toDisplay = np.copy(self.image)
if logScale is True: toDisplay = np.log10(toDisplay)
if image is not None: toDisplay = image
if pclip:
normMin = np.percentile(toDisplay[np.isfinite(toDisplay)],q=pclip)
normMax = np.percentile(toDisplay[np.isfinite(toDisplay)],q=100.0-pclip)
#Display NaNs as zeros so it looks better
toDisplay[np.isnan(toDisplay)] = 0
#Find the coordinates of the centers of the virtual pixels in degrees
#raMin = (self.gridRA[0:-1] + self.gridRA[1:])/2.0 / np.pi * 180.
#dec = (self.gridDec[0:-1] + self.gridDec[1:])/2.0 / np.pi * 180.
utils.plotArray(toDisplay,cbar=True,normMin=normMin,normMax=normMax,colormap=colormap)
#mpl.imshow(toDisplay,vmin=normMin,vmax=normMax, extent=(180./np.pi)*
# np.array([self.gridRA[0],self.gridRA[-1],self.gridDec[0],self.gridDec[-1]])
def test(photListFileName='/Users/vaneyken/Data/UCSB/ARCONS/Palomar2012/corot18/testPhotonList-blosc.h5',
vPlateScale=0.1, integrationTime=-1,firstSec=0):
photList = tables.openFile(photListFileName,mode='r')
try:
im = RADecImage(photList,vPlateScale=vPlateScale,firstSec=firstSec,
integrationTime=integrationTime)
finally:
print 'Closing phot. list file.'
photList.close()
im.display()
return im
|
bmazin/ARCONS-pipeline
|
photonlist/RADecImage2.py
|
Python
|
gpl-2.0
| 28,302
|
player_size = 0.5
cycle_length = 0.1
steps_per_cycle = 100
game_duration = 200
indexi = [0 , 1.0/3 , -1.0/3 , 2.0/3 , -2.0/3]
indexj = [1 , 2.0/3 , 2.0/3 , 1.0/3 , 1.0/3 ]
gwidth = 90
glength = 120
gfriction = 1
nteams = 2
players_per_team = 5
visualizer_path = '../visualizer/a.out'
max_ball_dist = 2
ball_vel_unit = 10
player_vel = 5
max_read = 1024
|
sjazayeri/minifeet
|
engine/config.py
|
Python
|
gpl-2.0
| 358
|
#!/usr/bin/python
# -*- enconding: utf-8 -*-
{
"name" : "Sprint Kanban",#Module's name
"version" : "1.1", #Version's number
"depends" : [ 'project',
'web_kanban',
'base_setup',
'base_status',
'product',
'analytic',
'board',
'mail',
'resource',], #Dependent module
"author" : "Vauxoo",
"description" : """
This is a module of the sprint kanban
""", #Description of the module
"website" : "http://vauxoo.com",#Website
"category" : "Project",
"init_xml" : [],
"demo_xml" : [],
"test" : [],
"update_xml" : [
'security/security_sprint_kanban.xml','security/ir.model.access.csv',
"view/sprint_kanban_view.xml",
"view/project_view.xml",
],
"installable" : True,
"active" : False
}
|
3dfxsoftware/cbss-addons
|
sprint_kanban/__openerp__.py
|
Python
|
gpl-2.0
| 759
|
# -*- coding: utf-8 -*-
# This file is part of MyPaint.
# Copyright (C) 2015 by Andrew Chadwick <a.tchadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Common interfaces & routines for surface and surface-like objects"""
import abc
import contextlib
import numpy
import sys
import os
import logging
logger = logging.getLogger(__name__)
import mypaintlib
import lib.helpers
from lib.errors import FileHandlingError
from lib.gettext import C_
N = mypaintlib.TILE_SIZE
# throttle excesssive calls to the save/render feedback_cb
TILES_PER_CALLBACK = 256
class Bounded (object):
"""Interface for objects with an inherent size"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_bbox(self):
"""Returns the bounding box of the object, in model coords
:returns: the data bounding box
:rtype: lib.helpers.Rect
"""
class TileAccessible (Bounded):
"""Interface for objects whose memory is accessible by tile"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def tile_request(self, tx, ty, readonly):
"""Access by tile, read-only or read/write
:param int tx: Tile X coord (multiply by TILE_SIZE for pixels)
:param int ty: Tile Y coord (multiply by TILE_SIZE for pixels)
:param bool readonly: get a read-only tile
Implementations must be `@contextlib.contextmanager`s which
yield one tile array (NxNx16, fix15 data). If called in
read/write mode, implementations must either put back changed
data, or alternatively they must allow the underlying data to be
manipulated directly via the yielded object.
See lib.tiledsurface.MyPaintSurface.tile_request() for a fuller
explanation of this interface and its expectations.
"""
class TileBlittable (Bounded):
"""Interface for unconditional copying by tile"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def blit_tile_into(self, dst, dst_has_alpha, tx, ty, *args, **kwargs):
"""Copies one tile from this object into a NumPy array
:param numpy.ndarray dst: destination array
:param bool dst_has_alpha: destination has an alpha channel
:param int tx: Tile X coord (multiply by TILE_SIZE for pixels)
:param int ty: Tile Y coord (multiply by TILE_SIZE for pixels)
:param \*args: Implementation may extend this interface
:param \*\*kwargs: Implementation may extend this interface
The destination is typically of dimensions NxNx4, and is
typically of type uint16 or uint8. Implementations are expected
to check the details, and should raise ValueError if dst doesn't
have a sensible shape or type.
This is an unconditional copy of this object's raw visible data,
ignoring any flags or opacities on the object itself which would
otherwise control what you see.
If the object consiste of multiple child layers with special
rendering flags, they should be composited normally into an
empty tile, and that resultant tile blitted.
"""
class TileCompositable (Bounded):
"""Interface for compositing by tile, with modes/opacities/flags"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def composite_tile(self, dst, dst_has_alpha, tx, ty, mipmap_level=0,
*args, **kwargs):
"""Composites one tile from this object over a NumPy array.
:param dst: target tile array (uint16, NxNx4, 15-bit scaled int)
:param dst_has_alpha: alpha channel in dst should be preserved
:param int tx: Tile X coord (multiply by TILE_SIZE for pixels)
:param int ty: Tile Y coord (multiply by TILE_SIZE for pixels)
:param int mode: mode to use when compositing
:param \*args: Implementation may extend this interface
:param \*\*kwargs: Implementation may extend this interface
Composite one tile of this surface over the array dst, modifying
only dst. Unlike `blit_tile_into()`, this method must respect
any special rendering settings on the object itself.
"""
class TileRequestWrapper (TileAccessible):
"""Adapts a compositable object into one supporting tile_request()
The wrapping is very minimal.
Tiles are composited into empty buffers on demand and cached.
The tile request interface is therefore read only,
and these wrappers should be used only as temporary objects.
"""
def __init__(self, obj, **kwargs):
"""Adapt a compositable object to support `tile_request()`
:param TileCompositable obj: object w/ tile-based compositing
:param **kwargs: Keyword args to pass to `composite_tile()`.
"""
super(TileRequestWrapper, self).__init__()
self._obj = obj
self._cache = {}
self._opts = kwargs
@contextlib.contextmanager
def tile_request(self, tx, ty, readonly):
"""Context manager that fetches a tile as a NumPy array
To be used with the 'with' statement.
"""
if not readonly:
raise ValueError("Only readonly tile requests are supported")
tile = self._cache.get((tx, ty), None)
if tile is None:
tile = numpy.zeros((N, N, 4), 'uint16')
self._cache[(tx, ty)] = tile
self._obj.composite_tile(tile, True, tx, ty, **self._opts)
yield tile
def get_bbox(self):
"""Explicit passthrough of get_bbox"""
return self._obj.get_bbox()
def __getattr__(self, attr):
"""Pass through calls to other methods"""
return getattr(self._obj, attr)
def get_tiles_bbox(tcoords):
"""Convert tile coords to a data bounding box
:param tcoords: iterable of (tx, ty) coordinate pairs
"""
res = lib.helpers.Rect()
for tx, ty in tcoords:
res.expandToIncludeRect(lib.helpers.Rect(N*tx, N*ty, N, N))
return res
def scanline_strips_iter(surface, rect, alpha=False,
single_tile_pattern=False, **kwargs):
"""Generate (render) scanline strips from a tile-blittable object
:param lib.surface.TileBlittable surface: Surface to iterate over
:param bool alpha: If true, write a PNG with alpha
:param bool single_tile_pattern: True if surface is a one tile only.
:param tuple \*\*kwargs: Passed to blit_tile_into.
The `alpha` parameter is passed to the surface's `blit_tile_into()`.
Rendering is skipped for all but the first line of single-tile patterns.
The scanline strips yielded by this generator are suitable for
feeding to a mypaintlib.ProgressivePNGWriter.
"""
# Sizes
x, y, w, h = rect
assert w > 0
assert h > 0
# calculate bounding box in full tiles
render_tx = x/N
render_ty = y/N
render_tw = (x+w-1)/N - render_tx + 1
render_th = (y+h-1)/N - render_ty + 1
# buffer for rendering one tile row at a time
arr = numpy.empty((1*N, render_tw*N, 4), 'uint8') # rgba or rgbu
# view into arr without the horizontal padding
arr_xcrop = arr[:, x-render_tx*N:x-render_tx*N+w, :]
first_row = render_ty
last_row = render_ty+render_th-1
for ty in range(render_ty, render_ty+render_th):
skip_rendering = False
if single_tile_pattern:
# optimization for simple background patterns
# e.g. solid color
if ty != first_row:
skip_rendering = True
for tx_rel in xrange(render_tw):
# render one tile
dst = arr[:, tx_rel*N:(tx_rel+1)*N, :]
if not skip_rendering:
tx = render_tx + tx_rel
try:
surface.blit_tile_into(dst, alpha, tx, ty, **kwargs)
except Exception:
logger.exception("Failed to blit tile %r of %r",
(tx, ty), surface)
mypaintlib.tile_clear_rgba8(dst)
# yield a numpy array of the scanline without padding
res = arr_xcrop
if ty == last_row:
res = res[:y+h-ty*N, :, :]
if ty == first_row:
res = res[y-render_ty*N:, :, :]
yield res
def save_as_png(surface, filename, *rect, **kwargs):
"""Saves a tile-blittable surface to a file in PNG format
:param TileBlittable surface: Surface to save
:param unicode filename: The file to write
:param tuple \*rect: Rectangle (x, y, w, h) to save
:param bool alpha: If true, write a PNG with alpha
:param callable feedback_cb: Called every TILES_PER_CALLBACK tiles.
:param bool single_tile_pattern: True if surface is a one tile only.
:param bool save_srgb_chunks: Set to False to not save sRGB flags.
:param tuple \*\*kwargs: Passed to blit_tile_into (minus the above)
The `alpha` parameter is passed to the surface's `blit_tile_into()`
method, as well as to the PNG writer. Rendering is
skipped for all but the first line for single-tile patterns.
If `*rect` is left unspecified, the surface's own bounding box will
be used.
If `save_srgb_chunks` is set to False, sRGB (and associated fallback
cHRM and gAMA) will not be saved. MyPaint's default behaviour is
currently to save these chunks.
Raises `lib.errors.FileHandlingError` with a descriptive string if
something went wrong.
"""
# Horrible, dirty argument handling
alpha = kwargs.pop('alpha', False)
feedback_cb = kwargs.pop('feedback_cb', None)
single_tile_pattern = kwargs.pop("single_tile_pattern", False)
save_srgb_chunks = kwargs.pop("save_srgb_chunks", True)
# Sizes. Save at least one tile to allow empty docs to be written
if not rect:
rect = surface.get_bbox()
x, y, w, h = rect
if w == 0 or h == 0:
x, y, w, h = (0, 0, 1, 1)
rect = (x, y, w, h)
writer_fp = None
try:
writer_fp = open(filename, "wb")
logger.debug(
"Writing %r (%dx%d) alpha=%r srgb=%r",
filename,
w, h,
alpha,
save_srgb_chunks,
)
pngsave = mypaintlib.ProgressivePNGWriter(
writer_fp,
w, h,
alpha,
save_srgb_chunks,
)
feedback_counter = 0
scanline_strips = scanline_strips_iter(
surface, rect,
alpha=alpha,
single_tile_pattern=single_tile_pattern,
**kwargs
)
for scanline_strip in scanline_strips:
pngsave.write(scanline_strip)
if feedback_cb and feedback_counter % TILES_PER_CALLBACK == 0:
feedback_cb()
feedback_counter += 1
pngsave.close()
logger.debug("Finished writing %r", filename)
except (IOError, OSError, RuntimeError) as err:
logger.exception(
"Caught %r from C++ png-writer code, re-raising as a "
"FileHandlingError",
err,
)
raise FileHandlingError(C_(
"low-level PNG writer failure report (dialog)",
u"Failed to write โ{basename}โ.\n\n"
u"Reason: {err}\n"
u"Target folder: โ{dirname}โ."
).format(
err = err,
basename = os.path.basename(filename),
dirname = os.path.dirname(filename),
))
# Other possible exceptions include TypeError, ValueError, but
# those indicate incorrect coding usually; just raise them
# normally.
finally:
if writer_fp:
writer_fp.close()
|
dothiko/mypaint
|
lib/surface.py
|
Python
|
gpl-2.0
| 11,840
|
import sublime
import sublime_plugin
import re
DEFINITION_KEY = 'MarkdownEditing-footnote-definitions'
REFERENCE_KEY = 'MarkdownEditing-footnote-references'
REFERENCE_REGEX = "\[\^([^\]]*)\]"
DEFINITION_REGEX = "^ *\[\^([^\]]*)\]:"
def get_footnote_references(view):
ids = {}
for ref in view.get_regions(REFERENCE_KEY):
if not re.match(DEFINITION_REGEX, view.substr(view.line(ref))):
id = view.substr(ref)[2:-1]
if id in ids:
ids[id].append(ref)
else:
ids[id] = [ref]
return ids
def get_footnote_definition_markers(view):
ids = {}
for defn in view.get_regions(DEFINITION_KEY):
id = view.substr(defn).strip()[2:-2]
ids[id] = defn
return ids
def get_footnote_identifiers(view):
ids = get_footnote_references(view).keys()
ids.sort()
return ids
def get_last_footnote_marker(view):
ids = sorted([int(a) for a in get_footnote_identifiers(view) if a.isdigit()])
if len(ids):
return int(ids[-1])
else:
return 0
def get_next_footnote_marker(view):
return get_last_footnote_marker(view) + 1
def is_footnote_definition(view):
line = view.substr(view.line(view.sel()[-1]))
return re.match(DEFINITION_REGEX, line)
def is_footnote_reference(view):
refs = view.get_regions(REFERENCE_KEY)
for ref in refs:
if ref.contains(view.sel()[0]):
return True
return False
def strip_trailing_whitespace(view, edit):
tws = view.find('\s+\Z', 0)
if tws:
view.erase(edit, tws)
class MarkFootnotes(sublime_plugin.EventListener):
def update_footnote_data(self, view):
view.add_regions(REFERENCE_KEY, view.find_all(REFERENCE_REGEX), '', 'cross', sublime.HIDDEN)
view.add_regions(DEFINITION_KEY, view.find_all(DEFINITION_REGEX), '', 'cross', sublime.HIDDEN)
def on_modified(self, view):
self.update_footnote_data(view)
def on_load(self, view):
self.update_footnote_data(view)
class GatherMissingFootnotesCommand(sublime_plugin.TextCommand):
def run(self, edit):
edit = self.view.begin_edit()
refs = get_footnote_identifiers(self.view)
defs = get_footnote_definition_markers(self.view)
missingnotes = [note_token for note_token in refs if not note_token in defs]
if len(missingnotes):
self.view.insert(edit, self.view.size(), "\n")
for note in missingnotes:
self.view.insert(edit, self.view.size(), '\n [^%s]: ' % note)
self.view.end_edit(edit)
def is_enabled(self):
return True
class InsertFootnoteCommand(sublime_plugin.TextCommand):
def run(self, edit):
edit = self.view.begin_edit()
startloc = self.view.sel()[-1].end()
markernum = get_next_footnote_marker(self.view)
if bool(self.view.size()):
targetloc = self.view.find('(\s|$)', startloc).begin()
else:
targetloc = 0
self.view.insert(edit, targetloc, '[^%s]' % markernum)
self.view.insert(edit, self.view.size(), '\n [^%s]: ' % markernum)
self.view.run_command('set_motion', {"inclusive": True, "motion": "move_to", "motion_args": {"extend": True, "to": "eof"}})
if self.view.settings().get('command_mode'):
self.view.run_command('enter_insert_mode', {"insert_command": "move", "insert_args": {"by": "characters", "forward": True}})
self.view.end_edit(edit)
def is_enabled(self):
return True
class GoToFootnoteDefinitionCommand(sublime_plugin.TextCommand):
def run(self, edit):
defs = get_footnote_definition_markers(self.view)
regions = self.view.get_regions(REFERENCE_KEY)
sel = self.view.sel()
if len(sel) == 1:
target = None
selreg = sel[0]
for region in regions:
if selreg.intersects(region):
target = self.view.substr(region)[2:-1]
if not target:
try:
target = self.view.substr(self.view.find(REFERENCE_REGEX, sel[-1].end()))[2:-1]
except:
pass
if target:
self.view.sel().clear()
self.view.sel().add(defs[target])
self.view.show(defs[target])
def is_enabled(self):
return True
class GoToFootnoteReferenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
refs = get_footnote_references(self.view)
match = is_footnote_definition(self.view)
if match:
target = match.groups()[0]
self.view.sel().clear()
[self.view.sel().add(a) for a in refs[target]]
self.view.show(refs[target][0])
def is_enabled(self):
return True
class MagicFootnotesCommand(sublime_plugin.TextCommand):
def run(self, edit):
if (is_footnote_definition(self.view)):
self.view.run_command('go_to_footnote_reference')
elif (is_footnote_reference(self.view)):
self.view.run_command('go_to_footnote_definition')
else:
self.view.run_command('insert_footnote')
def is_enabled(self):
return True
class SwitchToFromFootnoteCommand(sublime_plugin.TextCommand):
def run(self, edit):
if (is_footnote_definition(self.view)):
self.view.run_command('go_to_footnote_reference')
else:
self.view.run_command('go_to_footnote_definition')
def is_enabled(self):
return True
class SortFootnotesCommand(sublime_plugin.TextCommand):
def run(self, edit):
edit = self.view.begin_edit()
strip_trailing_whitespace(self.view, edit)
self.view.end_edit(edit)
edit = self.view.begin_edit()
defs = get_footnote_definition_markers(self.view)
notes = {}
erase = []
keyorder = map(lambda x: self.view.substr(x)[2:-1], self.view.get_regions(REFERENCE_KEY))
keys = []
[keys.append(r) for r in keyorder if not r in keys]
for (key, item) in defs.items():
fnend = self.view.find('(\s*\Z|\n\s*\n(?!\ {4,}))', item.end())
fnreg = sublime.Region(item.begin(), fnend.end())
notes[key] = self.view.substr(fnreg).strip()
erase.append(fnreg)
erase.sort()
erase.reverse()
[self.view.erase(edit, reg) for reg in erase]
self.view.end_edit(edit)
edit = self.view.begin_edit()
for key in keys:
self.view.insert(edit, self.view.size(), '\n\n ' + notes[key])
self.view.end_edit(edit)
def is_enabled(self):
return True
|
JT5D/Alfred-Popclip-Sublime
|
Sublime Text 2/MarkdownEditing/footnotes.py
|
Python
|
gpl-2.0
| 6,711
|
#!/usr/bin/python
#
# Copyright (c) 2013 Simon Lukasik
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
from sqlalchemy import Column, Integer, Sequence, String
from bronte.model.entities.entities_common import Base
class BrStockMarket(Base):
__tablename__ = 'brstockmarket'
id = Column(Integer, Sequence('br_stockmarker_id_seq'), primary_key=True)
acronym = Column(String)
name = Column(String)
def __init__(self, acronym, name):
self.acronym = acronym
self.name = name
def __repr__(self):
return "<BrStockMarket('%s','%s')>" % (self.acronym, self.name)
|
isimluk/bronte
|
backend/bronte/model/entities/brStockMarket.py
|
Python
|
gpl-2.0
| 948
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
# Serbian version by Vlada Periฤ <vlada.peric@gmail.com>, 2009.
# Based on the Croatian DateHandler by Josip
"""
Serbian-specific classes for parsing and displaying dates.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ..lib.date import Date
from _dateparser import DateParser
from _datedisplay import DateDisplay
from _datehandler import register_datehandler
#-------------------------------------------------------------------------
#
# Serbian parser
#
#-------------------------------------------------------------------------
class DateParserSR(DateParser):
"""
Converts a text string into a Date object
"""
month_to_int = DateParser.month_to_int
month_to_int[u"januar"] = 1
month_to_int[u"januara"] = 1
month_to_int[u"jan"] = 1
month_to_int[u"ัะฐะฝ"] = 1
month_to_int[u"ัะฐะฝัะฐัะฐ"] = 1
month_to_int[u"ัะฐะฝัะฐั"] = 1
month_to_int[u"i"] = 1
month_to_int[u"februar"] = 2
month_to_int[u"februara"] = 2
month_to_int[u"feb"] = 2
month_to_int[u"ัะตะฑ"] = 2
month_to_int[u"ัะตะฑััะฐั"] = 2
month_to_int[u"ัะตะฑััะฐัะฐ"] = 2
month_to_int[u"ii"] = 2
month_to_int[u"mart"] = 3
month_to_int[u"marta"] = 3
month_to_int[u"mar"] = 3
month_to_int[u"ะผะฐั"] = 3
month_to_int[u"ะผะฐัั"] = 3
month_to_int[u"ะผะฐััะฐ"] = 3
month_to_int[u"iii"] = 3
month_to_int[u"april"] = 4
month_to_int[u"aprila"] = 4
month_to_int[u"apr"] = 4
month_to_int[u"ะฐะฟั"] = 4
month_to_int[u"ะฐะฟัะธะป"] = 4
month_to_int[u"ะฐะฟัะธะปะฐ"] = 4
month_to_int[u"iv"] = 4
month_to_int[u"maj"] = 5
month_to_int[u"maja"] = 5
month_to_int[u"ะผะฐั"] = 5
month_to_int[u"ะผะฐัะฐ"] = 5
month_to_int[u"v"] = 5
month_to_int[u"jun"] = 6
month_to_int[u"juna"] = 6
month_to_int[u"ััะฝ"] = 6
month_to_int[u"ััะฝะฐ"] = 6
month_to_int[u"vi"] = 6
month_to_int[u"jul"] = 7
month_to_int[u"jula"] = 7
month_to_int[u"ััะป"] = 7
month_to_int[u"ััะปะฐ"] = 7
month_to_int[u"vii"] = 7
month_to_int[u"avgust"] = 8
month_to_int[u"avgusta"] = 8
month_to_int[u"avg"] = 8
month_to_int[u"ะฐะฒะณ"] = 8
month_to_int[u"ะฐะฒะณััั"] = 8
month_to_int[u"ะฐะฒะณัััะฐ"] = 8
month_to_int[u"viii"] = 8
month_to_int[u"septembar"] = 9
month_to_int[u"septembra"] = 9
month_to_int[u"sep"] = 9
month_to_int[u"ัะตะฟ"] = 9
month_to_int[u"ัะตะฟัะตะผะฑะฐั"] = 9
month_to_int[u"ัะตะฟัะตะผะฑัะฐ"] = 9
month_to_int[u"ix"] = 9
month_to_int[u"oktobar"] = 10
month_to_int[u"oktobra"] = 10
month_to_int[u"okt"] = 10
month_to_int[u"ะพะบั"] = 10
month_to_int[u"ะพะบัะพะฑะฐั"] = 10
month_to_int[u"ะพะบัะพะฑัะฐ"] = 10
month_to_int[u"x"] = 10
month_to_int[u"novembar"] = 11
month_to_int[u"novembra"] = 11
month_to_int[u"nov"] = 11
month_to_int[u"ะฝะพะฒ"] = 11
month_to_int[u"ะฝะพะฒะตะผะฑะฐั"] = 11
month_to_int[u"ะฝะพะฒะตะผะฑัะฐ"] = 11
month_to_int[u"xi"] = 11
month_to_int[u"decembar"] = 12
month_to_int[u"decembra"] = 12
month_to_int[u"dec"] = 12
month_to_int[u"ะดะตั"] = 12
month_to_int[u"ะดะตัะตะผะฑะฐั"] = 12
month_to_int[u"ะดะตัะตะผะฑัะฐ"] = 12
month_to_int[u"xii"] = 12
modifier_to_int = {
u'pre' : Date.MOD_BEFORE,
u'posle' : Date.MOD_AFTER,
u'oko' : Date.MOD_ABOUT,
u'cca' : Date.MOD_ABOUT,
u'ะฟัะต' : Date.MOD_BEFORE,
u'ะฟะพัะปะต' : Date.MOD_AFTER,
u'ะพะบะพ' : Date.MOD_ABOUT,
}
calendar_to_int = {
u'gregorijanski' : Date.CAL_GREGORIAN,
u'greg.' : Date.CAL_GREGORIAN,
u'julijanski' : Date.CAL_JULIAN,
u'jul.' : Date.CAL_JULIAN,
u'hebrejski' : Date.CAL_HEBREW,
u'hebr.' : Date.CAL_HEBREW,
u'islamski' : Date.CAL_ISLAMIC,
u'isl.' : Date.CAL_ISLAMIC,
u'francuski republikanski': Date.CAL_FRENCH,
u'franc.' : Date.CAL_FRENCH,
u'persijski' : Date.CAL_PERSIAN,
u'pers. ' : Date.CAL_PERSIAN,
u'ลกvedski' : Date.CAL_SWEDISH,
u'ลกv.' : Date.CAL_SWEDISH,
u'ะณัะตะณะพัะธัะฐะฝัะบะธ' : Date.CAL_GREGORIAN,
u'ะณัะตะณ.' : Date.CAL_GREGORIAN,
u'ััะปะธัะฐะฝัะบะธ' : Date.CAL_JULIAN,
u'ััะป.' : Date.CAL_JULIAN,
u'ั
ะตะฑัะตััะบะธ' : Date.CAL_HEBREW,
u'ั
ะตะฑั.' : Date.CAL_HEBREW,
u'ะธัะปะฐะผัะบะธ' : Date.CAL_ISLAMIC,
u'ะธัะป.' : Date.CAL_ISLAMIC,
u'ััะฐะฝัััะบะธ ัะตะฟัะฑะปะธะบะฐะฝัะบะธ': Date.CAL_FRENCH,
u'ััะฐะฝั.' : Date.CAL_FRENCH,
u'ะฟะตััะธััะบะธ' : Date.CAL_PERSIAN,
u'ะฟะตัั. ' : Date.CAL_PERSIAN,
u'ัะฒะตะดัะบะธ' : Date.CAL_SWEDISH,
u'ัะฒ' : Date.CAL_SWEDISH,
}
quality_to_int = {
u'procenjeno' : Date.QUAL_ESTIMATED,
u'pro.' : Date.QUAL_ESTIMATED,
u'izraฤunato' : Date.QUAL_CALCULATED,
u'izr.' : Date.QUAL_CALCULATED,
u'ะฟัะพัะตัะตะฝะพ' : Date.QUAL_ESTIMATED,
u'ะฟัะพ.' : Date.QUAL_ESTIMATED,
u'ะธะทัะฐััะฝะฐัะพ' : Date.QUAL_CALCULATED,
u'ะธะทั.' : Date.QUAL_CALCULATED,
}
bce = [u"ะฟัะต ะฝะพะฒะต ะตัะต", u"ะฟัะต ะฅัะธััะฐ", u"ะฟ.ะฝ.ะต."
u"pre nove ere", u"pre Hrista", u"p.n.e."] + DateParser.bce
def init_strings(self):
"""
compiles regular expression strings for matching dates
"""
DateParser.init_strings(self)
# match 'Day. MONTH year.' format with or without dots
self._text2 = re.compile('(\d+)?\.?\s*?%s\s*((\d+)(/\d+)?)?\.?\s*$'
% self._mon_str, re.IGNORECASE)
# match Day.Month.Year.
self._numeric = re.compile("((\d+)[/\. ])?\s*((\d+)[/\.])?\s*(\d+)\.?$")
_span1 = [u'od', u'ะพะด']
_span2 = [u'do', u'ะดะพ']
_range1 = [u'izmeฤu', u'ะธะทะผะตัั']
_range2 = [u'i', u'ะธ']
self._span = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_span1),'|'.join(_span2)),
re.IGNORECASE)
self._range = re.compile("(%s)\s+(?P<start>.+)\s+(%s)\s+(?P<stop>.+)" %
('|'.join(_range1),'|'.join(_range2)),
re.IGNORECASE)
#-------------------------------------------------------------------------
#
# Serbian display
#
#-------------------------------------------------------------------------
class DateDisplaySR_latin(DateDisplay):
"""
Serbian (latin) date display class
"""
long_months = ("",
u"januara", u"februara", u"marta", u"aprila",
u"maja", u"juna", u"jula", u"avgusta",
u"septembra", u"oktobra", u"novembra", u"decembra"
)
short_months = ("",
u"jan", u"feb", u"mar", u"apr", u"maj", u"jun",
u"jul", u"avg", u"sep", u"okt", u"nov", u"dec"
)
roman_months = (
"", "I", "II", "III", "IV", "V", "VI",
"VII", "VIII", "IX", "X", "XI", "XII"
)
calendar = (
"", u"julijanski", u"hebrejski",
u"francuski republikanski", u"persijski", u"islamski",
u"ลกvedski"
)
_mod_str = ("", "pre ", "posle ", "oko ", "", "", "")
_qual_str = ("", "procenjeno ", "izraฤunato ")
_bce_str = "%s p.n.e."
formats = (
"GGGG-MM-DD (ISO-8601)",
"Numeriฤki (DD.MM.GGGG.)",
"D. MMM GGGG.",
"D. Mesec GGGG.",
"D. Rb GGGG."
)
def _display_gregorian(self, date_val):
"""
display gregorian calendar date in different format
"""
year = self._slash_year(date_val[2], date_val[3])
if self.format == 0:
return self.display_iso(date_val)
elif self.format == 1:
## DD.MM.YYYY.
if date_val[3]:
return self.display_iso(date_val)
else:
if date_val[0] == 0 and date_val[1] == 0:
value = str(date_val[2])
else:
value = self._tformat.replace('%m', str(date_val[1]))
value = value.replace('%d', str(date_val[0]))
value = value.replace('%Y', str(abs(date_val[2])))
#some locale magic already provides the right separator
#value = value.replace('/', '.')
elif self.format == 2:
# Day. MON Year.
if date_val[0] == 0:
if date_val[1] == 0:
value = u"%s." % year
else:
value = u"%s %s." % (self.short_months[date_val[1]], year)
else:
value = u"%d. %s %s." % (date_val[0],
self.short_months[date_val[1]], year)
elif self.format == 3:
# Day. MONTH Year.
if date_val[0] == 0:
if date_val[1] == 0:
value = u"%s." % year
else:
value = u"%s %s." % (self.long_months[date_val[1]], year)
else:
value = u"%d. %s %s." % (date_val[0],
self.long_months[date_val[1]], year)
else:
# Day RomanMon Year
if date_val[0] == 0:
if date_val[1] == 0:
value = u"%s." % year
else:
value = "%s %s." % (self.roman_months[date_val[1]], year)
else:
value = "%d. %s %s." % (date_val[0],
self.roman_months[date_val[1]], year)
if date_val[2] < 0:
return self._bce_str % value
else:
return value
def display(self, date):
"""
Return a text string representing the date.
"""
mod = date.get_modifier()
cal = date.get_calendar()
qual = date.get_quality()
start = date.get_start_date()
newyear = date.get_new_year()
qual_str = self._qual_str[qual]
if mod == Date.MOD_TEXTONLY:
return date.get_text()
elif start == Date.EMPTY:
return ""
elif mod == Date.MOD_SPAN:
d_1 = self.display_cal[cal](start)
d_2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, u'od', d_1, u'do', d_2,
scal)
elif mod == Date.MOD_RANGE:
d_1 = self.display_cal[cal](start)
d_2 = self.display_cal[cal](date.get_stop_date())
scal = self.format_extras(cal, newyear)
return "%s%s %s %s %s%s" % (qual_str, u'izmeฤu', d_1, u'i', d_2,
scal)
else:
text = self.display_cal[date.get_calendar()](start)
scal = self.format_extras(cal, newyear)
return "%s%s%s%s" % (qual_str, self._mod_str[mod], text,
scal)
#-------------------------------------------------------------------------
#
# Register classes
#
#-------------------------------------------------------------------------
register_datehandler(('sr', 'serbian', 'srpski', 'sr_RS'),
DateParserSR, DateDisplaySR_latin)
|
arunkgupta/gramps
|
gramps/gen/datehandler/_date_sr.py
|
Python
|
gpl-2.0
| 13,117
|
import networkx as nx
import itertools as it
import random as rnd
import math
AVG_SPEED_OF_A_PEDESTRIAN = 1.5
STATION_NEVER_VISITED = -1
STATION_NOT_VISITED_AT_TIME = -2
def generateTrace(withConfigurationFile):
# Parse configuration file settings
configurationSettings = initSettings( withConfigurationFile )
# Create files for the output traces
fd_mobilityTraceFile = open(configurationSettings["o_mobilityfile"], 'w')
fd_eventsFile = open(configurationSettings["o_eventsfile"], 'w')
# Dictionary that stores, for each station, all routes passing through that station
# For each 'visit' the user's ID and time of visit are stored
visitingPattern = {}
# Parse a GrapML file using the NetworkX python library
graph = nx.read_graphml( configurationSettings["i_graphfile"], int )
# Get the graph's list of nodes, each one of these corresponding to a public transport station
nodes = nx.nodes( graph )
# Obtain the (source, destination) probability distribution from a file
probDistribution = obtainProbabilityDistributionOfNodes( configurationSettings["probdist"] )
# Distribute all users among the stations following the given probability distribution
userDistribution = distributeUsersAmongNodes( configurationSettings["users"], nodes, probDistribution )
stopTime = configurationSettings["stoptime"]
for user in userDistribution:
source = user[1]
# Generate mobility trace
currentHop = source
currentHop_x = graph.node[currentHop]['latitude']
currentHop_y = graph.node[currentHop]['longitude']
# Each user begins its journey at a given distance from the source station
radius = configurationSettings["radius"]
writeInitialPositionToFile(fd_mobilityTraceFile, user[0], currentHop_x+radius, currentHop_y+radius)
# Wait some time before starting to walk towards the source station
initialDelay = rnd.uniform(0.0, configurationSettings["startdelay"])
writeMovementToFile(fd_mobilityTraceFile, initialDelay, user[0], currentHop_x, currentHop_y, \
AVG_SPEED_OF_A_PEDESTRIAN)
# Calculate the time when the user reaches the source station
atTime = initialDelay + (getDistanceToStation(radius) / AVG_SPEED_OF_A_PEDESTRIAN)
while atTime < stopTime:
# Choose a destination node for each user based on the probability distribution
destination = selectDestination(nodes, probDistribution)
# Calculate the shortest path between source and destination
shortestPath = nx.dijkstra_path(graph, source, destination)
# Add this information to the visiting pattern
if currentHop not in visitingPattern:
visitingPattern[currentHop] = [ (user[0], atTime) ]
else:
visitingPattern[currentHop].append( (user[0], atTime) )
# Evaluate the remaining nodes in the shortest path
for nextHop in shortestPath[1:]:
nextHop_x = graph.node[nextHop]['latitude']
nextHop_y = graph.node[nextHop]['longitude']
# Calculate the speed at which the node will travel
atSpeed = rnd.uniform(configurationSettings["minspeed"], configurationSettings["maxspeed"])
writeMovementToFile(fd_mobilityTraceFile, atTime, user[0], nextHop_x, nextHop_y, atSpeed)
# Calculate the waiting time when reaching a station
pause = rnd.uniform(0.0, configurationSettings["maxpause"])
# Calculate the next time instant of the mobility model
distance = graph[currentHop][nextHop]['weight']
if distance == 0: # We could be reaching a link station between two separate lines
distance = euclideanDistance(\
[graph.node[currentHop]['latitude'], graph.node[currentHop]['longitude']], \
[graph.node[nextHop]['latitude'], graph.node[nextHop]['longitude']]\
)
atTime = atTime + (float(distance) / atSpeed) + pause
if atTime > stopTime:
break
else:
if nextHop not in visitingPattern:
visitingPattern[nextHop] = [ (user[0], atTime) ]
else:
visitingPattern[nextHop].append( (user[0], atTime) )
currentHop = nextHop
source = nextHop
numOfIncidents = generateIncidents(fd_eventsFile, nodes, visitingPattern, configurationSettings["geninterval"], stopTime)
print numOfIncidents
fd_mobilityTraceFile.close()
fd_eventsFile.close()
def initSettings(fromConfigurationFile):
fd_configurationFile = open(fromConfigurationFile, 'r')
settingDict = {}
for configurationDescription in fd_configurationFile:
setting, value = configurationDescription.split('=')
value = value.replace('\n', '') # Remove end of line character
if setting == "i_graphfile":
settingDict[setting] = value
elif setting == "o_mobilityfile":
settingDict[setting] = value
elif setting == "o_eventsfile":
settingDict[setting] = value
elif setting == "probdist":
settingDict[setting] = value
elif setting == "users":
settingDict[setting] = int( value )
elif setting == "minspeed":
settingDict[setting] = float( value )
elif setting == "maxspeed":
settingDict[setting] = float( value )
elif setting == "maxpause":
settingDict[setting] = float( value )
elif setting == "radius":
settingDict[setting] = float( value )
elif setting == "startdelay":
settingDict[setting] = float( value )
elif setting == "geninterval":
settingDict[setting] = float( value )
elif setting == "stoptime":
settingDict[setting] = float( value )
fd_configurationFile.close()
return settingDict
def generateIncidents(fileDescriptor, nodes, visitingPattern, interval, stopTime):
currentTime = 0
incidents = []
incidentsGenerated = 0
while currentTime < stopTime:
# Randomly select a station where a new incident will be generated
station = rnd.choice( nodes )
# Check the number of nodes that passed through that station at any time
if station not in visitingPattern:
incidents.append( (currentTime, STATION_NEVER_VISITED, station) )
incidentsGenerated += 1
else:
eligibleUsers = visitingPattern[station]
# Check how many users have passed through the station when the incident was generated
before = currentTime - 180
after = currentTime + 180
electedUsers = filter( lambda n: before <= n[1] <= after, eligibleUsers )
# If more than one user passed through the station, we chose one at random to
# generate the incident report
if len( electedUsers ) > 0:
user = rnd.choice( electedUsers )
incidents.append( (user[1], user[0], station) )
incidentsGenerated += 1
else:
incidents.append( (currentTime, STATION_NOT_VISITED_AT_TIME, station) )
incidentsGenerated += 1
currentTime += interval
incidents.sort( )
previous = ''
for incident in incidents:
current = '$ns_ at {} "$node_({}) geninc at {}"\n'.format(\
incident[0], incident[1], incident[2])
if current != previous:
fileDescriptor.write( current )
previous = current
return incidentsGenerated
def obtainProbabilityDistributionOfNodes(probDistributionFile):
fd_probDistributionFile = open( probDistributionFile, 'r' )
probDistribution = {}
for line in fd_probDistributionFile:
node, dstProb, srcProb = line.split(',')
srcProb = srcProb.replace('\n', '') # Remove end of line character
probDistribution[int(node)] = (float(dstProb), float(srcProb))
fd_probDistributionFile.close()
return probDistribution
def distributeUsersAmongNodes(numberOfUsers, nodes, probDistribution):
nodesCircular = it.cycle( nodes )
userDistribution = []
currentUser = 0
while currentUser < numberOfUsers:
node = nodesCircular.next()
if rnd.random() < probDistribution[node][0]:
userDistribution.append( (currentUser, node) )
currentUser += 1
return userDistribution
def selectDestination(fromNodes, withProbDistribution):
criteria = rnd.random()
nodesThatMeetCriteria = []
selectedDestination = rnd.choice( fromNodes )
for node in fromNodes:
if criteria < withProbDistribution[node][1]:
nodesThatMeetCriteria.append( node )
if len( nodesThatMeetCriteria ) > 0:
selectedDestination = rnd.choice( nodesThatMeetCriteria )
return selectedDestination
def getDistanceToStation(radius):
return math.sqrt( radius ** 2 + radius ** 2 )
def euclideanDistance(source, destination):
x1, y1 = source
x2, y2 = destination
return math.sqrt( (x2-x1) ** 2 + (y2-y1) ** 2 )
def writeInitialPositionToFile(fileDescriptor, userId, posX, posY):
fileDescriptor.write( '$node_({}) set X_ {}\n'.format(userId, posX) )
fileDescriptor.write( '$node_({}) set Y_ {}\n'.format(userId, posY) )
def writeMovementToFile(fileDescriptor, atTime, userId, posX, posY, atSpeed):
fileDescriptor.write( '$ns_ at {} "$node_({}) setdest {} {} {}"\n'.format(\
atTime, userId, posX, posY, atSpeed) )
|
cristiantanas/MCS-MobilityGenerator
|
graphwalk/GraphWalkMobilityModel.py
|
Python
|
gpl-2.0
| 8,640
|
# -*- coding: utf-8 -*-
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests():
"""Wrapper for ./setup.py test."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "intranet.settings")
django.setup()
test_runner = get_runner(settings)()
failures = test_runner.run_tests([])
sys.exit(failures)
|
jacobajit/ion
|
intranet/test/test_suite.py
|
Python
|
gpl-2.0
| 389
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from math import *
from ngspice.syntax import *
from ngspice.simulator import *
__env__ = {k:v for k,v in globals().iteritems()}
import sys, os
import code
from cStringIO import StringIO
__all__= ['Interpy']
class Interpy(code.InteractiveInterpreter):
__env__ = __env__
def __init__(self, locals={}):
env = Interpy.__env__
if isinstance(locals, dict):
env.update(locals)
code.InteractiveInterpreter.__init__(self, locals=env)
self.stdout = ""
self.stderr = ""
def runcode(self, code):
sys.stdout = stdout = StringIO()
sys.stderr = stderr = StringIO()
try:
exec code in self.locals
except SystemExit:
pass
except:
self.showtraceback()
finally:
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__
self.stdout = stdout.getvalue()
self.stderr = stderr.getvalue()
stdout.close()
stderr.close()
def context(self):
return self.__dict__['locals']
|
raphaelvalentin/QTModel
|
exec_script.py
|
Python
|
gpl-2.0
| 1,171
|
################################################################################
#
# Gene prediction pipeline
#
# $Id$
#
# Copyright (C) 2007 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#################################################################################
"""Subroutines for working on I/O of large genomic files.
"""
USAGE="""python GenomicIO.py [options] name [ files ]
Index fasta formatted files to create a database called "name".
"""
import os, sys, array, string, re, types, optparse, time, struct
import math
import random
import zlib
import gzip
import cStringIO
##------------------------------------------------------------
class SArray(array.array):
"""an array posing as a sequence.
This class conserves memory as it uses only 1 bytes per letter,
while python strings use the machine word size for a letter.
"""
def __init__(self, *args):
self.mType = args[0]
array.array.__init__(self, *args)
def __getslice__( self, *args):
"""return slice as a string."""
return array.array.__getslice__(self, *args).tostring()
def __setslice__( self, start, end, sub):
"""set slice start:end from a string sub."""
return array.array.__setslice__(self, start, end,
array.array( self.mType, sub ))
def __str__(self):
return self.tostring()
##------------------------------------------------------------
class Uncompressor:
def __init__(self, filename, unmangler):
self.mFile = open(filename, "rb" )
self.mUnMangler = unmangler
def read( self, block_size, indices, start, end ):
"""read an uncompressed block from start:end.
The compressed chunk starts at first_pos.
NOTE: This is poorly implemented - do better.
"""
# skip over uncompressed blocks
d = int(math.floor(float(start) / block_size) )
r = start % block_size
assert( d < len(indices) )
self.mFile.seek( indices[d] )
# read x bytes of compressed data, at least one full chunk.
nchunks = int(math.ceil( float((r+end-start)) / block_size) )
fragments = []
for x in range(d, d+nchunks):
s = self.mFile.read( indices[x+1] - indices[x] )
fragments.append(self.mUnMangler( s ))
u = "".join(fragments)
assert(len(u) >= end - start)
return u[r:r+end-start]
##------------------------------------------------------------
def writeFragments( outfile_fasta, outfile_index,
fragments, mangler, size,
write_all = False):
"""write mangled fragments to outfile in chunks of size.
returns remaining chunk.
if write_all is True, all of fragments are written and
the position of the last position is added to the index
as well.
"""
s = "".join(fragments)
if len(s) > size:
for x in range(0, len(s)-1, size):
outfile_index.write( "\t%i" % outfile_fasta.tell() )
outfile_fasta.write( mangler(s[x:x+size]) )
x = len(s) % size
if x:
if write_all:
outfile_index.write("\t%i" % outfile_fasta.tell() )
outfile_fasta.write( mangler( s[-x:] ) )
outfile_index.write("\t%i" % outfile_fasta.tell() )
return ""
else:
return s[-x:]
else:
return ""
def gzip_mangler(s):
xfile = cStringIO.StringIO()
gzipfile = gzip.GzipFile( fileobj = xfile, mode="wb" )
gzipfile.write( s )
gzipfile.close()
m = xfile.getvalue()
xfile.close()
return m
def gzip_demangler(s):
gzipfile = gzip.GzipFile( fileobj = cStringIO.StringIO(s), mode="rb" )
m = gzipfile.readline()
return m
##------------------------------------------------------------
def createDatabase( db, filenames,
force = False,
synonyms = None,
compression = None,
random_access_points = None,
regex_identifier = None):
"""index files in filenames to create database.
Two new files are created - db.fasta and db_name.idx
If compression is enabled, provide random access points
every # bytes.
Dictzip is treated as an uncompressed file.
regex_identifier: pattern to extract identifier from description line.
If None, the part until the first white-space character is used.
"""
if compression:
if compression == "lzo":
import lzo
def lzo_mangler( s ): return lzo.compress(s, 9)
mangler = lzo_mangler
db_name = db + ".lzo"
write_chunks = True
elif compression == "zlib":
def zlib_mangler( s ): return zlib.compress( s, 9)
mangler = zlib_mangler
db_name = db + ".zlib"
write_chunks = True
elif compression == "gzip":
mangler = gzip_mangler
db_name = db + ".gz"
write_chunks = True
elif compression == "dictzip":
import dictzip
mangler = lambda x: x
db_name = db + ".dz"
write_chunks = False
elif compression == "debug":
mangler = lambda x: x
db_name = db + ".debug"
write_chunks = True
else:
raise "unknown compression library: %s" % compression
else:
mangler = lambda x: x
db_name = db + ".fasta"
write_chunks = False
index_name = db + ".idx"
if db in filenames:
raise ValueError( "database (%s) is part of input set." % db_name)
if os.path.exists( db_name ) and not force:
raise ValueError( "database %s already exists." % db_name )
if os.path.exists( index_name ) and not force:
raise ValueError( "database index %s already exists." % index_name )
outfile_index = open( index_name, "w" )
if compression == "dictzip":
if random_access_points == None or random_access_points <= 0:
raise ValueError("specify dictzip chunksize in --random-access-points")
outfile_fasta = dictzip.open( db_name, "wb", buffersize=1000000, chunksize=random_access_points )
compression = None
else:
outfile_fasta = open( db_name, "wb" )
if type(filenames) == types.StringType:
filenames = [filenames]
identifiers = {}
lsequence = 0
identifier_pos, sequence_pos = 0, 0
translation = string.maketrans("xX", "nN")
for filename in filenames:
infile = open( filename, "r")
fragments = []
lfragment = 0
first = True
for line in infile:
if line[0] == "#": continue
if line[0] == ">" :
if not first:
if write_chunks:
writeFragments( outfile_fasta, outfile_index, fragments, mangler,
random_access_points, True )
fragments = []
lfragment = 0
else:
outfile_fasta.write( "\n" )
outfile_index.write("\t%i\n" % lsequence)
first = False
if regex_identifier:
try:
identifier = re.search(regex_identifier, line[1:-1]).groups()[0]
except AttributeError:
raise "could not parse identifer from line %s" % line[1:-1]
else:
identifier = re.split("\s", line[1:-1])[0]
## check for duplicate identifiers
if identifier in identifiers:
raise ValueError, "%s occurs more than once in %s and %s: line=%s" %\
(identifier, identifiers[identifier], filename, line[1:-1])
identifiers[identifier] = filename
# write identifier, the identifier includes a new-line
identifier_pos = outfile_fasta.tell()
outfile_fasta.write( "%s" % mangler(line) )
sequence_pos = outfile_fasta.tell()
outfile_index.write( "%s\t%i" % (identifier,
identifier_pos ) )
if write_chunks:
outfile_index.write( "\t%i" % random_access_points )
else:
outfile_index.write( "\t%i" % sequence_pos )
lsequence = 0
else:
s = re.sub( "\s", "", line.strip() )
if options.clean_sequence:
s = s.translate( translation )
lsequence += len(s)
if write_chunks:
fragments.append(s)
lfragment += len(s)
if lfragment > random_access_points:
rest = writeFragments( outfile_fasta, outfile_index,
fragments, mangler, random_access_points,
False)
fragments = [rest]
lfragment = len(rest)
else:
outfile_fasta.write( mangler(s) )
if write_chunks:
writeFragments( outfile_fasta, outfile_index, fragments, mangler, random_access_points, True )
else:
outfile_fasta.write( "\n" )
outfile_index.write("\t%i\n" % lsequence )
# add synonyms for the table
if synonyms:
for key, val in options.synonymous:
outfile_index.write( "%s\t%s\n" % (key, val) )
# map of names
# order is suffix data, suffix index, noSeek
NAME_MAP={
'uncompressed' : ('fasta', 'idx', False),
'lzo' : ('lzo', 'cdx', True ),
'dictzip' : ('dz', 'idx', False ),
'zlib' : ('zlib', 'cdx', True ),
'gzip' : ('gzip', 'cdx', True ),
'debug' : ('debug', 'cdx', True ),
}
PREFERENCES=('uncompressed', 'lzo', 'dictzip', 'zlib', 'gzip', 'debug')
class IndexedFasta:
def __init__( self, dbname ):
for x in PREFERENCES:
d = "%s.%s" % (dbname, NAME_MAP[x][0] )
i = "%s.%s" % (dbname, NAME_MAP[x][1] )
if os.path.exists( d ) and os.path.exists( i ):
self.mMethod = x
self.mDbname = d
self.mNameIndex = i
self.mNoSeek = NAME_MAP[x][2]
break
else:
raise KeyError, "unknown database %s" % dbname
self.mIsLoaded = False
def __getitem__(self, key ):
"""return full length sequence."""
return self.getSequence( key, "+", 0, 0, as_array = True )
def __loadIndex( self ):
"""load complete index into memory."""
if self.mMethod == "uncompressed":
self.mDatabaseFile = open( self.mDbname, "r" )
elif self.mMethod == "dictzip":
import dictzip
self.mDatabaseFile = dictzip.GzipFile( self.mNameDb)
elif self.mMethod == "lzo":
import lzo
self.mDatabaseFile = Uncompressor( self.mNameDb, lzo.decompress )
elif self.mMethod == "gzip":
self.mDatabaseFile = Uncompressor( self.mNameDb, gzip_demangler )
elif self.mMethod == "zlib":
self.mDatabaseFile = Uncompressor( self.mNameDb, zlib.decompress )
elif eslf.mMethod == "bz2":
self.mDatabaseFile = bz2.BZ2File( self.mNameDb )
elif self.mMethod == "debug":
self.mDatabaseFile = Uncompressor( self.mDbname + ".debug", lambda x: x )
self.mIndex = {}
for line in open(self.mNameIndex, "r"):
data = line[:-1].split("\t")
if len(data) == 2:
self.mSynonyms[data[0]] = data[1]
else:
## index with random access points
if len(data) > 4:
(identifier, pos_id, block_size, lsequence) = data[0], int(data[1]), int(data[2]), int(data[-1])
points = map(int, data[3:-1])
self.mIndex[identifier] = (pos_id, block_size, lsequence, points)
else:
(identifier, pos_id, pos_seq, lsequence) = data[0], int(data[1]), int(data[2]), int(data[-1])
self.mIndex[identifier] = (pos_id, pos_seq, lsequence)
self.mIsLoaded = True
def getDatabaseName( self ):
"""returns the name of the database."""
return self.mDbname
def getLength( self, sbjct_token ):
"""return sequence length for sbjct_token."""
if not self.mIsLoaded: self.__loadIndex()
return self.mIndex[sbjct_token][2]
def getContigSizes( self ):
"""return hash with contig sizes."""
if not self.mIsLoaded: self.__loadIndex()
contig_sizes = {}
for key, val in self.mIndex.items():
contig_sizes[key] = val[2]
return contig_sizes
def getSequence( self,
contig,
strand = "+",
start = 0,
end = 0,
converter = None,
as_array = False):
"""get a genomic fragment.
A genomic fragment is identified by the coordinates
contig, strand, start, end.
The converter function supplied translated these coordinates
into 0-based coordinates.
If as_array is set to true, return the SArray object. This might
be beneficial for large sequence chunks. If as_array is set to False,
return a python string.
"""
if not self.mIsLoaded: self.__loadIndex()
if contig not in self.mIndex:
raise KeyError, "%s not in index" % contig
data = self.mIndex[contig]
# dummy is
# -> pos_seq for seekable streams
# -> block_size for unseekable streams
pos_id, dummy, lsequence = data[:3]
pos_seq = dummy
block_size = dummy
if end == 0: end = lsequence
if end > lsequence:
raise ValueError("3' coordinate on %s out of bounds: %i > %i" % (contig, end, lsequence))
if start < 0:
raise ValueError("5' coordinate on %s out of bounds: %i < 0" % (contig, start))
if converter:
first_pos, last_pos = converter( start, end,
str(strand) in ("+", "1"),
lsequence )
else:
first_pos, last_pos = start, end
if str(strand) in ("-", "0", "-1"):
first_pos, last_pos = lsequence - last_pos, lsequence - first_pos
assert( first_pos < last_pos )
p = SArray( "c" )
if self.mNoSeek:
## read directly from position
p.fromstring( self.mDatabaseFile.read( block_size, data[3], first_pos, last_pos) )
else:
first_pos += pos_seq
last_pos += pos_seq
self.mDatabaseFile.seek( first_pos )
p.fromstring( self.mDatabaseFile.read( last_pos - first_pos ) )
if str(strand) in ("-", "0", "-1"):
p.reverse()
p = SArray("c",
string.translate( p[:],
string.maketrans("ACGTacgt", "TGCAtgca") ) )
if as_array:
return p
else:
# cast to string
return p[:]
def getRandomCoordinates( self, size ):
"""returns coordinates for a random fragment of size #.
Deafult sampling mode:
Each residue has the same probability of being
in a fragment. Thus, the fragment can be smaller than
size due to contig boundaries.
"""
if not self.mIsLoaded: self.__loadIndex()
token = random.choice( self.mIndex.keys() )
strand = random.choice( ("+", "-") )
pos_id, pos_seq, lcontig = self.mIndex[token][:3]
rpos = random.randint( 0, lcontig )
if random.choice( ("True", "False") ):
start = rpos
end = min(rpos + size, lcontig)
else:
start = max(0, rpos - size)
end = rpos
return token, strand, start, end
###############################################################################
###############################################################################
###############################################################################
## converter functions. Some code duplication could be avoided but
## I preferred to keep the functions lean.
###############################################################################
def __one_forward_closed(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
x -= 1
if not c: x, y = l - y, l - x
return x, y
def __zero_forward_closed(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
y += 1
if not c: x, y = l - y, l - x
return x, y
def __one_both_closed(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x - 1, y
def __zero_both_closed(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x, y + 1
def __one_forward_open(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
x -= 1
y -= 1
if not c: x, y = l - y, l - x
return x, y
def __zero_forward_open(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
if not c: x, y = l - y, l - x
return x, y
def __one_both_open(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x - 1, y - 1
def __zero_both_open(x, y, c = None, l = None):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
return x, y
def getConverter( format ):
"""return a converter function for converting various
coordinate schemes into 0-based, both strand, closed-open ranges.
converter functions have the parameters
x, y, s, l: with x and y the coordinates of
a sequence fragment, s the strand (True is positive)
and l being the length of the contig.
Format is a "-" separated combination of the keywords
"one", "zero", "forward", "both", "open", "closed"
"""
data = set(format.split("-"))
if "one" in data:
if "forward" in data:
if "closed" in data:
return __one_forward_closed
else:
return __one_forward_open
else:
if "closed" in data:
return __one_both_closed
else:
return __one_both_open
else:
if "forward" in data:
if "closed" in data:
return __zero_forward_closed
else:
return __zero_forward_open
else:
if "closed" in data:
return __zero_both_closed
else:
return __zero_both_open
## Test function for benchmarking purposes
def benchmarkRandomFragment( fasta, size ):
"""returns a random fragment of size."""
contig, strand, start, end = fasta.getRandomCoordinates( size )
s = fasta.getSequence( contig, strand, start, end )
return s
def verify( fasta1, fasta2, num_iterations, fragment_size,
stdout = sys.stdout, quiet = False ):
"""verify two databases.
Get segment from fasta1 and check for presence in fasta2.
"""
if not quiet:
options.stdout.write("verifying %s and %s using %i random segments of length %i\n" %\
(fasta1.getDatabaseName(),
fasta2.getDatabaseName(),
num_iterations,
fragment_size ))
options.stdout.flush()
nerrors = 0
for x in range(num_iterations):
contig, strand, start, end = fasta1.getRandomCoordinates( fragment_size )
s1 = fasta1.getSequence(contig,strand,start,end)
s2 = fasta2.getSequence(contig,strand,start,end)
if s1 != s2:
if not quiet:
options.stdout.write("discordant segment: %s:%s:%i:%i\n%s\n%s\n" %\
(contig, strand, start, end, s1, s2) )
nerrors += 1
return nerrors
if __name__ == "__main__":
import Experiment
parser = optparse.OptionParser( version = "%prog version: $Id$", usage = USAGE)
parser.add_option( "-e", "--extract", dest="extract", type="string",
help="extract region ( for testing purposes. Format is contig:strand:from:to." )
parser.add_option( "-c", "--compression", dest="compression", type="choice",
choices=("lzo", "zlib", "gzip", "dictzip", "bzip2", "debug"),
help="compress database." )
parser.add_option( "--random-access-points", dest="random_access_points", type="int",
help="save random access points every # number of nucleotides." )
parser.add_option( "-f", "--input-format", dest="input_format", type="choice",
choices=("one-forward-open", "zero-both-open" ),
help="coordinate format of input." )
parser.add_option( "-s", "--synonyms", dest="synonyms", type="string",
help="list of synonyms, comma separated with =, for example, chr1=chr1b" )
parser.add_option( "-b", "--benchmark", dest="benchmark", action="store_true",
help="benchmark read access." )
parser.add_option( "--benchmark-num-iterations", dest="benchmark_num_iterations", type="int",
help="number of iterations for benchmark [%DEFAULT%]." )
parser.add_option( "--benchmark-fragment-size", dest="benchmark_fragment_size", type="int",
help="benchmark: fragment size [%DEFAULT%]." )
parser.add_option( "--verify", dest="verify", type="string",
help="verify against other database.")
parser.add_option( "-a", "--clean-sequence", dest="clean_sequence", action="store_true",
help="remove X/x from DNA sequences - they cause errors in exonerate." )
parser.add_option( "--regex-identifier", dest="regex_identifier", type="string",
help="regular expression for extracting the identifier from fasta description line." )
parser.set_defaults(
extract = None,
input_format = "zero-both-open",
benchmark_fragment_size = 1000,
benchmark_num_iterations = 1000000,
benchmark = False,
compression = None,
random_access_points = 0,
synonyms = None,
verify = None,
verify_num_iterations = 100000,
verify_fragment_size = 100,
clean_sequence = False,
regex_identifier = None)
(options, args) = Experiment.Start( parser )
if options.synonyms:
synonyms = {}
for x in options.synonyms.split(","):
a,b = x.split("=")
a = a.strip()
b = b.strip()
if a not in synonyms: synonyms[a] = []
synonyms[a].append( b )
else:
synonyms = None
if options.extract:
fasta = IndexedFasta( args[0] )
converter = getConverter( options.input_format )
contig, strand, start, end = options.extract.split(":")
start, end = map( int, (start, end) )
sequence = fasta.getSequence( contig, strand,
start, end,
converter = converter )
options.stdout.write( ">%s\n%s\n" % \
( options.extract, sequence ) )
elif options.benchmark:
import timeit
timer = timeit.Timer( stmt="benchmarkRandomFragment( fasta = fasta, size = %i)" % (options.benchmark_fragment_size),
setup="""from __main__ import benchmarkRandomFragment,IndexedFasta\nfasta=IndexedFasta( "%s" )""" % (args[0] ) )
t = timer.timeit( number = options.benchmark_num_iterations )
options.stdout.write("iter\tsize\ttime\n" )
options.stdout.write("%i\t%i\t%i\n" % (options.benchmark_num_iterations, options.benchmark_fragment_size, t ) )
elif options.verify:
fasta1 = IndexedFasta( args[0] )
fasta2 = IndexedFasta( options.verify )
nerrors1 = verify( fasta1, fasta2,
options.verify_num_iterations,
options.verify_fragment_size,
stdout=options.stdout )
options.stdout.write("errors=%i\n" % (nerrors1) )
nerrors2 = verify( fasta2, fasta1,
options.verify_num_iterations,
options.verify_fragment_size,
stdout=options.stdout )
options.stdout.write("errors=%i\n" % (nerrors2) )
else:
if options.loglevel >= 1:
options.stdlog.write("# creating database %s\n" % args[0])
options.stdlog.write("# indexing the following files: \n# %s\n" %\
(" \n# ".join( args[1:] ) ))
options.stdlog.flush()
if synonyms:
options.stdlog.write("# Applying the following synonyms:\n" )
for k,v in synonyms.items():
options.stdlog.write( "# %s=%s\n" % (k, ",".join(v) ) )
options.stdlog.flush()
if len(args) < 2:
print USAGE
sys.exit(1)
createDatabase( args[0], args[1:], synonyms = synonyms,
random_access_points = options.random_access_points,
compression = options.compression,
regex_identifier = options.regex_identifier )
Experiment.Stop()
|
AndreasHeger/pairsdb
|
pairsdb/src/IndexedFasta.py
|
Python
|
gpl-2.0
| 28,192
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class MtgPipeline(object):
def process_item(self, item, spider):
return item
|
BLannoo/MTG
|
MTG/pipelines.py
|
Python
|
gpl-2.0
| 283
|
# -*- coding: utf-8 -*-
'''
Bubbles Addon
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import tvmaze
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.genre_filter = ['animation', 'anime']
self.domains = ['animeultima.io']
self.base_link = 'http://www.animeultima.io'
self.search_link = '/search.html?searchquery=%s'
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']
t = cleantitle.get(tvshowtitle)
q = self.search_link % (urllib.quote_plus(tvshowtitle))
q = urlparse.urljoin(self.base_link, q)
r = client.request(q)
r = client.parseDOM(r, 'ol', attrs = {'id': 'searchresult'})[0]
r = client.parseDOM(r, 'h2')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in r]
r = [i for i in r if t == cleantitle.get(i[1])]
r = r[-1][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
tv_maze = tvmaze.tvMaze()
num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
num = str(num)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'tr', attrs = {'class': ''})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if num == i[1]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
locDict = [i[0] for i in hostDict]
result = client.request(url)
links = []
try:
r = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0]
r = client.parseDOM(r, 'iframe', ret='src')[0]
links += [(r, url)]
except:
pass
try:
r = client.parseDOM(result, 'div', attrs = {'class': 'generic-video-item'})
r = [(i.split('</div>', 1)[-1].split()[0], client.parseDOM(i, 'a', ret='href', attrs = {'rel': '.+?'})) for i in r]
links += [(i[0], i[1][0]) for i in r if i[1]]
except:
pass
for i in links:
try:
try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i[0].strip().lower()).netloc)[0]
except: host = i[0].lower()
host = host.rsplit('.', 1)[0]
if not host in locDict: raise Exception()
host = [x[1] for x in hostDict if x[0] == host][0]
host = host.encode('utf-8')
url = i[1]
url = urlparse.urljoin(self.base_link, url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
url = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
return url
except:
return
|
azumimuo/family-xbmc-addon
|
plugin.video.bubbles/resources/lib/sources/english/hoster/open/animeultima.py
|
Python
|
gpl-2.0
| 5,437
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2017 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Generic module for basic synchronisation with Active Directory.
A synchronisation script must create an instance of such a sync class from this
file, or an instance' subclass. It should then feed it with configuration
variables before the synchronisation should start. Example::
sync = BaseSync.get_class(sync_type)(db, logger)
sync.configure(config_args)
sync.fullsync() # or: sync.quicksync(change_key)
Subclasses should be made when:
- Active Directory for the instance has extra functionality which requires more
than just new attributes. Examples: Exchange, home directories and maybe
Lync.
- An instance has special needs which the base sync is not flexible enough to
support.
The classes should be designed so that they're easy to subclass and change most
of its behaviour.
Some terms:
- entity is an account/group/OU or anything else in Cerebrum - this corresponds
to an object in AD.
- Entities in quarantine are often referred to as deactivated. In AD is this
called disabled.
"""
import time
import uuid
import adconf
from Cerebrum import Entity, Errors
from Cerebrum.modules import CLHandler, Email
from Cerebrum.modules.EntityTrait import EntityTrait
from Cerebrum.Utils import unicode2str, Factory, dyn_import, NotSet
from Cerebrum.utils import json
from Cerebrum.utils.email import sendmail
from Cerebrum.modules.ad2 import ADUtils, ConfigUtils
from Cerebrum.modules.ad2.CerebrumData import CerebrumEntity
from Cerebrum.modules.ad2.ConfigUtils import ConfigError
from Cerebrum.modules.ad2.winrm import CommandTooLongException
from Cerebrum.modules.ad2.winrm import PowershellException
from Cerebrum.modules.gpg.data import GpgData
from Cerebrum.QuarantineHandler import QuarantineHandler
class BaseSync(object):
"""Class for the generic AD synchronisation functionality.
All the AD-synchronisation classes should subclass this one.
The sync's behaviour:
1. Configuration:
- All config is set - subclasses could add more settings.
- The config is checked - subclasses could override this.
2. At fullsync:
- AD is asked to start generating a list of objects
- Data from Cerebrum gets cached:
- All entities in Cerebrum to sync is listed. Each entity is
represented as an instance of L{CerebrumEntity}.
- Quarantined entities gets marked as deactive.
- Attributes as stored in Cerebrum.
- Subclasses could cache more.
- Each entity's AD-attributes get calculated.
- Process each object retrieved from AD:
- Gets ignored if in an OU we should not touch.
- Gets removed/disabled in AD if no entity match the object.
- If not active in Cerebrum, disable/move object in AD, according
to what the config says.
- Gets moved to correct OU if put somewhere else, but only if
config says so.
- Attributes gets compared. Those in AD not equal to Cerebrum gets
updated.
- Subclasses could add more functionality.
- Remaining entities that was not found in AD gets created in AD.
3. At quicksync:
- Get all unhandled events from the ChangeLog
- Process each unhandled event
- If successfully processed, mark the event as handled
Subclasses could of course make changes to this behaviour.
"""
# What class to make an instance of for talking with the AD server:
server_class = ADUtils.ADclient
# List of messages that should be given to the administrators of the AD
# domain. Such messages are errors that could not be fixed by Cerebrum's
# sysadmins, unless they have admin privileges in the AD domain.
_ad_admin_messages = []
# The required settings. If any of these settings does not exist in the
# config for the given AD-sync, an error will be triggered. Note that
# subclasses must define their own list for their own settings.
settings_required = ('sync_type', 'domain', 'server', 'target_ou',
'search_ou', 'object_classes')
# Settings with default values. If any of these settings are not defined in
# the config for the given AD-sync, they will instead get their default
# value. Note that subclasses must define their own list for their own
# values.
settings_with_default = (('dryrun', False),
('mock', False),
('encrypted', True),
('auth_user', 'cereauth'),
('domain_admin', 'cerebrum_sync'),
('move_objects', False),
('subset', None),
('ad_admin_message', ()),
('name_format', '%s'),
('ignore_ou', ()),
('create_ous', False),
('attributes', {}),
('useraccountcontrol', {}),
('store_sid', False),
('handle_unknown_objects', ('ignore', None)),
('handle_deactivated_objects', ('ignore', None)),
('gpg_recipient_id', None),
('language', ('nb', 'nn', 'en')),
('changes_too_old_seconds', 60*60*24*365),
('group_type', 'security'),
('group_scope', 'global'),
('ou_mappings', []),
('script', {}),
)
# A mapping from the entity_type to the correct externalid_type. Note that
# the mapping gets converted to CerebrumConstants at startup.
sidtype_map = {'account': 'AD_ACCSID',
'group': 'AD_GRPSID'}
def __init__(self, db, logger):
"""Initialize the sync.
A superclass is connecting to the given AD agent. TODO: or should we
only use ADclient directly in this class instead? Depends on how
complicated things are getting.
:type db: Cerebrum.CLDatabase.CLDatabase
:type logger: Cerebrum.logutils.loggers.CerebrumLogger
"""
super(BaseSync, self).__init__()
self.db = db
self.logger = logger
self.co = Factory.get("Constants")(self.db)
self.clconst = Factory.get("CLConstants")(self.db)
self.ent = Factory.get('Entity')(self.db)
self._ent_extid = Entity.EntityExternalId(self.db)
self._entity_trait = EntityTrait(self.db)
# Where the sync configuration should go:
self.config = dict()
# Where the entities to work on should go. Keys should be entity_names:
self.entities = dict()
# Where entity_ids of entities currently exempt from a sync should go.
self.exempt_entities = list()
# A mapping from entity_id to the entities.
self.id2entity = dict()
# A mapping from AD-id to the entities. AD-id is per default
# SamAccountName, but could be set otherwise in the config.
self.adid2entity = dict()
@classmethod
def get_class(cls, sync_type='', classes=None):
"""Build a synchronisation class out of given class names.
This works like Factory.get() but you could specify the list of class
names yourself. The point of this is to be able to dynamically create a
synchronisation class with the features that is needed without having
to hardcode it.
All the given class names gets imported before a new class is created
out of them. Note that this class is automatically inherited in the
list.
Note that the order of the classes are important if they are related to
each others by subclassing. You can not list class A before subclasses
of the class A, as that would mean the subclass won't override any of
A's methods. The method would then raise an exception.
@type sync_type: string
@param sync_type:
The name of a AD-sync type which should be defined in the AD
configuration. If given, the classes defined for the given type
will be used for setting up the sync class. This parameter gets
ignored if L{classes} is set.
@type classes: list or tuple
@param classes:
The names of all the classes that should be used in the sync class.
If this is specified, the L{sync_type} parameter gets ignored.
Example on classes:
- Cerebrum.modules.ad2.ADSync/UserSync
- Cerebrum.modules.no.uio.ADSync/UiOUserSync
"""
assert classes or sync_type, "Either sync_type or classes needed"
if not classes:
if sync_type not in adconf.SYNCS:
raise Exception('Undefined AD-sync type: %s' % sync_type)
conf = adconf.SYNCS[sync_type]
classes = conf.get('sync_classes')
if not classes:
raise Exception('No sync class defined for sync type %s' %
sync_type)
return cls._generate_dynamic_class(classes,
'_dynamic_adsync_%s' % sync_type)
def _format_name(self, name_to_format):
"""Adjust the name of the object according to the sync's configuration.
The type of adjustment is defined by the type of 'name_format'
configuration parameter.
@type name_to_format: string
@param name_to_format: the name of the object. It can be either
just adjusted according to some string formatting, or become
an input parameter to a function, that performs more complex
transformation.
"""
nformat = self.config.get('name_format', '%s')
if callable(nformat):
# There is a transformation function defined in the config
return nformat(name_to_format)
else:
# This is a string formatting in the configuration
return nformat % name_to_format
def configure(self, config_args):
"""Read configuration options from given arguments and config file.
The configuration is for how the ADsync should behave and work. Could
be subclassed to support more settings for subclass functionality.
Defined basic configuration settings:
- target_spread: Either a Spread constant or a list of Spread
constants. Used to find what entities from Cerebrum to sync with AD.
- root_ou (string): The root OU that should be searched for objects in
AD.
- target_ou (string): What OU in AD that should be set as the default
OU for the objects.
- handle_unknown_objects: What to do with objects that are not found in
Cerebrum. Could either be missing spread, that they're deleted, or if
they have never existed in Cerebrum. Entities in quarantine but with
the correct target_spread are not affected by this.
Values:
('disable', None) # Deactivate object. This is the default.
('move', OU) # Deactivate object and move to a given OU.
('delete', None) # Delete the object. Can't be restored.
('ignore', None) # Do not do anything with these objects.
- move_objects (bool): If objects in the wrong OU should be moved to
the target_ou, or being left where it is. Other attributes are still
updated for the object. Defaults to False.
- attributes: The attributes to sync. Must be a dict with the name of
the attributes as keys and the values is further config for the given
attribute. The configuration is different per attribute.
@type config_args: dict
@param config_args:
Configuration data that should be set. Overrides any settings that
is found from config file (adconf). Unknown keys in the dict is not
warned about, as it could be targeted at subclass configuration.
"""
# Required settings. Will fail if not defined in the config:
for key in self.settings_required:
try:
self.config[key] = config_args[key]
except KeyError:
raise Exception('Missing required config variable: %s' % key)
# Settings which have default values if not set:
for key, default in self.settings_with_default:
self.config[key] = config_args.get(key, default)
# Set what object class type in AD to use, either the config or what is
# set in any of the subclasses of the ADSync. Most subclasses should
# set a default object class.
self.ad_object_class = config_args.get('ad_object_class',
self.default_ad_object_class)
# The object class is generated dynamically, depending on the given
# list of classes:
self.logger.debug2("Using object classes: %s",
', '.join(config_args['object_classes']))
self._object_class = self._generate_dynamic_class(
config_args['object_classes'],
'_dynamic_adobject_%s' % self.config['sync_type'])
if not issubclass(self._object_class, CerebrumEntity):
raise ConfigError(
'Given object_classes not subclass of %s' % CerebrumEntity)
# Calculate target spread and target entity_type, depending on what
# settings that exists:
if config_args.get('target_spread'):
# Explicitly set target spreads will override the other settings
spread = self.co.Spread(config_args['target_spread'])
self.config['target_spread'] = spread
self.config['target_type'] = spread.entity_type
else:
# Otherwise we use the set 'sync_type' for finding the spread:
spread = self.co.Spread(self.config['sync_type'])
try:
int(spread)
except Errors.NotFoundError:
if 'target_type' in config_args:
self.config['target_type'] = config_args['target_type']
self.config['target_spread'] = None
else:
raise ConfigUtils.ConfigError(
'Either sync name must be a spread, or target_type '
'must be defined')
else:
self.config['target_spread'] = spread
self.config['target_type'] = spread.entity_type
# Convert the entity_type into the type constant
self.config['target_type'] = self.co.EntityType(
self.config['target_type'])
# Languages are changed into the integer of their constants
self.config['language'] = tuple(int(self.co.LanguageCode(l)) for l in
self.config['language'])
# Change-types are changed into their constants
self.config['change_types'] = tuple(self.clconst.ChangeType(*t) for t in
config_args.get('change_types',
()))
# Set the correct port
if 'port' in config_args:
self.config['port'] = config_args['port']
else:
self.config['port'] = 5986
if not self.config['encrypted']:
self.config['port'] = 5985
if config_args.get('dc_server'):
self.config['dc_server'] = config_args['dc_server']
if self.config['subset']:
self.logger.info("Sync will only be run for subset: %s",
self.config['subset'])
# Log if in dryrun
if self.config['dryrun']:
self.logger.info('In dryrun mode, AD will not be updated')
if self.config['mock']:
self.logger.info('In mock mode, AD will not be connected to')
from Cerebrum.modules.ad2 import ADMock
self.server_class = ADMock.ADclientMock
# We'll need to set mock mode for all sync configurations, since
# the sync will instantiate clients with other configurations, in
# order to collect necessary AD attributes.
for key in adconf.SYNCS:
adconf.SYNCS[key]['mock'] = True
# Messages for AD-administrators should be logged if the config says
# so, or if there are no other options set:
self.config['log_ad_admin_messages'] = False
if (not self.config['ad_admin_message'] or
any(o in (None, 'log') for o in
self.config['ad_admin_message'])):
self.config['log_ad_admin_messages'] = True
if self.config['store_sid']:
converted = dict((self.co.EntityType(e_type),
self.co.EntityExternalId(sid_type))
for e_type, sid_type
in self.sidtype_map.iteritems())
self.sidtype_map = converted
# We define the group scope and type for new groups.
# This should probably be moved to UiADistGroupSync, and so should the
# code that touches new_group_scope in the create_object-method (in
# this class).
self.new_group_scope = self.config['group_scope'].lower()
self.new_group_type = self.config['group_type'].lower()
# Check the config
self.config_check()
def config_check(self):
"""Check that the basic configuration is okay."""
if not isinstance(self.config['ignore_ou'], (tuple, list)):
raise Exception("ignore_ou must be list/tuple")
if not self.config['target_ou'].endswith(self.config['search_ou']):
self.logger.warn('target_ou should be under the search_ou')
# Check the attributes:
# Attributes that shouldn't be defined:
for n in ('dn', 'Dn', 'sn', 'Sn'):
if n in self.config['attributes']:
self.logger.warn('Bad attribute defined in config: %s' % n)
# The admin message config:
for opt in self.config['ad_admin_message']:
if opt[0] not in ('mail', 'file', 'log', None):
self.logger.warn("Unknown option in ad_admin_message: %s", opt)
if opt[1] not in ('error', 'warning', 'info', 'debug'):
self.logger.warn("Unknown level in ad_admin_message: %s", opt)
# some ways
if opt[0] in ('mail', 'file'):
if len(opt) <= 2:
self.logger.warn("Missing setting in ad_admin_message: %s",
opt)
# If name_format is string, it should include the '%s' for
# the entity_name to be put in.
nformat = self.config.get('name_format', '%s')
if not callable(nformat) and '%s' not in nformat:
self.logger.warn("Missing '%s' in name_format, name not included")
for handl in ('handle_unknown_objects', 'handle_deactivated_objects'):
var = self.config[handl]
if var[0] not in ('ignore', 'disable', 'move', 'delete'):
raise Exception(
"Bad configuration of %s - set to: %s" % (handl, var))
# Check that all the defined change_types exists:
for change_type in self.config.get('change_types', ()):
int(change_type)
# TODO: move the instantiation of the server to somewhere else!
self.setup_server()
@staticmethod
def _generate_dynamic_class(classes, class_name='_dynamic'):
"""Generate a dynamic class out of the given classes.
This is doing parts of what L{Utils.Factory.get} does, but without the
dependency of cereconf.
@type classes: list of str
@param classes:
The list of classes that should get combined and turned into a
dynamic class. The classes are represented by strings, starting
with the module path, ending with the class name in the module.
Example:
Cerebrum.modules.ad2.ADSync/UserSync
Cerebrum.modules.ad2.ADSync/PosixUserSync
Note that the order in the list is important. The last element is
the superclass, and everyone before is subclasses. This also means
that you if add related classes, subclasses must be added before
the superclasses.
@type class_name: str
@param class_name:
The name of the new class, e.g. represented by
L{__main__._dynamic}. Not used if only one class is given, as that
is then used directly - no need to create a new class that is
exactly the same as input.
@rtype: dynamic class
@return:
A dynamically generated class.
"""
bases = []
for c in classes:
mod_name, cname = c.split("/", 1)
mod = dyn_import(mod_name)
claz = getattr(mod, cname)
for override in bases:
if issubclass(claz, override):
raise Exception(
"Class %r should appear earlier in the list, as "
"it's a subclass of class %r." % (claz, override))
bases.append(claz)
if len(bases) == 1:
return bases[0]
# Dynamically construct the new class that inherits from all the given
# classes:
return type(class_name, tuple(bases), {})
def setup_server(self):
"""Instantiate the server class to use for WinRM."""
self.server = self.server_class(
logger=self.logger,
host=self.config['server'],
port=self.config.get('port'),
auth_user=self.config.get('auth_user'),
domain_admin=self.config.get('domain_admin'),
domain=self.config.get('domain'),
encrypted=self.config.get('encrypted', True),
ca=self.config.get('ca'),
client_cert=self.config.get('client_cert'),
client_key=self.config.get('client_key'),
dryrun=self.config['dryrun'])
if 'dc_server' in self.config:
self.server.set_domain_controller(self.config['dc_server'])
def add_admin_message(self, level, msg):
"""Add a message to be given to the administrators of the AD domain.
The messages should at the end be given to the administrators according
to what the confiuration says.
@type level: string
# TODO: make use of log constants instead?
@param level: The level of the given message to log. Used to separate
out what messages that should be given to the AD-administrators and
not.
@type msg: string
@param msg: The message that should be logged. Must not contain
sensitive data, like passwords, as it could be sent by mail.
"""
self.logger.info("AD-message: %s: %s", level, msg)
self._ad_admin_messages.append((level, msg))
if self.config['log_ad_admin_messages']:
func = getattr(self.logger, level)
func(msg)
def send_ad_admin_messages(self):
"""Send the messages for the AD-administrators, if any.
The way the messages should be sent is decided by the configuration.
"""
if not self._ad_admin_messages:
self.logger.debug("No AD-admin messages to send")
return
self.logger.debug('Found %d AD-admin messages to send',
len(self._ad_admin_messages))
txt = '\n'.join('%s: %s' % (x[0].upper(), unicode2str(x[1])) for x in
self._ad_admin_messages)
for opt in self.config['ad_admin_message']:
if opt[0] in (None, 'log'):
# Messages already logged when added.
pass
elif opt[0] == 'mail':
for address in opt[2:]:
self.logger.info("Sending %d messages to %s",
len(self._ad_admin_messages), address)
try:
sendmail(address, 'cerebrum@usit.uio.no',
'AD-sync messages for %s at %s' % (
self.config['sync_type'],
self.config['domain']),
txt, charset='utf-8',
debug=self.config['dryrun'])
except Exception as e:
self.logger.warn("Error sending AD-messages to %s: %s",
address, e)
elif opt[0] == 'file':
self.logger.warn(
"Sending AD-admin messages to file not implemented")
# TODO
else:
self.logger.warn("Unknown way to send AD-messages: %s" % opt)
self._ad_admin_messages = []
self.logger.debug('Sending AD-admin messages done...')
def fullsync(self):
"""Do the fullsync by comparing AD with Cerebrum and then update AD.
In subclasses, you should rather override the methods that this method
calls instead of overriding all of this method, unless you of course
want to do the fullsync completely different.
"""
self.logger.info("Fullsync started")
self.logger.debug("Pre-sync processing...")
self.pre_process()
ad_cmdid = self.start_fetch_ad_data()
self.logger.debug("Fetching cerebrum data...")
self.fetch_cerebrum_data()
self.logger.debug("Calculate AD values...")
self.calculate_ad_values()
self.logger.debug("Process AD data...")
self.process_ad_data(ad_cmdid)
self.logger.debug("Process entities not in AD...")
self.process_entities_not_in_ad()
self.logger.debug("Post-sync processing...")
self.post_process()
self.logger.info('Fullsync done')
self.send_ad_admin_messages()
# TODO: not sure if this is the place to put this, but we must close
# down connections on the server side:
self.server.close()
def quicksync(self, changekey=None, change_ids=None):
"""Do a quicksync, by sending the latest changes to AD.
All events of the given change_types are processed generically, and in
chronologically order.
Subclasses should rather override the methods that this method calls
instead of overring all of this method, as that is easier. Unless, of
course, you want to completely rewrite the behaviour of the quicksync.
The quicksync is going through L{change_log} for new events that has
not been marked as commited by the given L{changekey}. The list is
processed in reverse, so that equal events are only processed once.
:type changekey: string
:param changekey:
The change-log key to mark the events as commited or not. Must be
unique per job, unless you're in for race conditions and skipped
events.
:type change_ids: list or None
:param change_ids:
If specified, only the given change ids will be attempted executed.
The given IDs will be run no matter if they are considered finished
by the L{CLHandler}.
"""
self.logger.info("Quicksync started")
cl = CLHandler.CLHandler(self.db)
changetypes = self.config['change_types']
already_handled = set() # Changes that has been processed
# Avoid changes that are too old:
too_old = time.time() - int(self.config['changes_too_old_seconds'])
# generator to get events from changekey in correct order
def _events_for_change_key(k, t):
for e in reversed(cl.get_events(k, t)):
yield e
# generator to get events from change_ids in correct order
def _events_for_change_ids(ids):
for i in reversed(sorted(ids)):
rows = self.db.get_log_events(start_id=i, max_id=i)
try:
yield rows.next()
except StopIteration:
self.logger.warn("No change_id %s", i)
# Re-writeable functions for cl.confirm and cl.commit
confirm = lambda e: cl.confirm_event(e)
commit = lambda dryrun: None if dryrun else cl.commit_confirmations()
if changekey:
self.logger.debug("Processing changekey: %s", changekey)
events = _events_for_change_key(changekey, changetypes)
elif change_ids:
self.logger.debug("Processing given change_ids: %s", change_ids)
events = _events_for_change_ids(change_ids)
# Do not commit to CLHandler -- that won't work if cl is not set up
# with a changekey
commit = lambda r: None
confirm = lambda d: None
else:
raise Exception("Missing changekey or change_ids")
stats = dict(seen=0, processed=0, skipped=0, failed=0)
for row in events:
timestamp = int(row['tstamp'])
handle_key = tuple((int(row['change_type_id']),
row['subject_entity'],
row['dest_entity']))
change_type = self.clconst.ChangeType(int(row['change_type_id']))
stats['seen'] += 1
# Ignore too old changes:
if timestamp < too_old:
stats['skipped'] += 1
self.logger.info("Skipping too old change_id: %s",
row['change_id'])
confirm(row)
continue
# Ignore seen (change_type, subject, dest) tuples
if handle_key in already_handled:
stats['skipped'] += 1
self.logger.info(
"Skipping change_id %s: Already handled change type %s"
" for subject=%s, dest=%s", row['change_id'],
str(change_type), row['subject_entity'],
row['dest_entity'])
confirm(row)
continue
self.logger.debug(
"Processing change_id %s (%s), from %s subject_entity: %s",
row['change_id'], change_type, timestamp,
row['subject_entity'])
already_handled.add(handle_key)
try:
if self.process_cl_event(row):
stats['processed'] += 1
confirm(row)
else:
stats['skipped'] += 1
self.logger.debug(
"Unable to process %s for subject=%s dest=%s",
change_type, row['subject_entity'], row['dest_entity'])
except Exception:
stats['failed'] += 1
self.logger.error(
"Failed to process cl_event %s (%s) for %s",
row['change_id'], change_type, row['subject_entity'],
exc_info=1)
else:
commit(self.config['dryrun'])
commit(self.config['dryrun'])
self.logger.info("Handled %(seen)d events, processed: %(processed)d,"
" skipped: %(skipped)d, failed: %(failed)d", stats)
self.logger.info("Quicksync done")
self.send_ad_admin_messages()
def process_cl_event(self, row):
"""Process a given ChangeLog event.
This is normally called by the L{quicksync} method. Log changes that is
not set in L{adconf.SYNCS[<sync_type>][change_types]} will not be
called.
Subclasses should override for handling their own change types. The
Basesync only handles quite generic change types, and not e.g. account
specific changes.
@type row: dict of db-row
@param row:
A db-row, as returned from L{changelog.get_events()}. This is the
row that should be processed.
@rtype: bool
@return:
The result from the handler. Should be True if the sync succeeded
or there was no need for the change to be synced, i.e. the log
change could be confirmed. Should only return False if the change
needs to be redone.
@raise UnhandledChangeTypeError?
TODO: Should we have our own exception class that is used if the
method does not know what to do with a given change type? Could
then be used by subclasses.
@raise TODO:
TODO: What exceptions is expected here?
"""
# TODO: Add functionality for generic changes here!
self.logger.warn("Change type not handled: %s",
self.clconst.ChangeType(row['change_type_id']))
# TODO: Or rather raise an UnhandledChangeTypeError?
return False
def fetch_cerebrum_data(self):
"""Get basic data from Cerebrum.
Subclasses could extend this by getting more information from Cerebrum.
should first populate L{self.entities} with the entities
data, before calling this method from this superclass. This is because
this class does not populate the dict, but updates only the existing
entities with basic data, like quarantines.
"""
self.fetch_cerebrum_entities()
self.logger.debug("Fetched %d cerebrum entities" % len(self.entities))
# Make a mapping from entity_id to the entity:
self.id2entity = dict((self.entities[e].entity_id, self.entities[e])
for e in self.entities)
# Make a mapping from entity_name to the entity:
self.name2entity = dict((self.entities[e].entity_name,
self.entities[e]) for e in self.entities)
# Make a mapping from ad_id to the entity:
self.adid2entity = dict((self.entities[e].ad_id.lower(),
self.entities[e]) for e in self.entities)
if len(self.entities) != len(self.adid2entity):
self.logger.warn("Mismatch in mapping of ad_id -> entity_id")
self.fetch_quarantines()
self.fetch_spreads()
self.fetch_attributes()
if self.config['store_sid']:
self.fetch_sids()
def fetch_cerebrum_entities(self):
"""Get and cache all the entities from Cerebrum.
This method MUST be created by the subclasses, to get the proper
entities to synchronize with AD.
"""
raise Exception('Must be defined in the proper subclass')
def fetch_quarantines(self):
"""Get all quarantines from Cerebrum and update L{self.entities} with
this. Called after the entities should have been retrieved from
Cerebrum, so all in quarantine gets tagged as deactivated.
"""
self.logger.debug("Fetch quarantines...")
# Limit the search to the entity_type the target_spread is meant for:
target_type = self.config['target_type']
ids = None
if self.config['subset']:
ids = self.id2entity.keys()
quarantined_accounts = QuarantineHandler.get_locked_entities(
self.db,
entity_types=target_type,
entity_ids=ids)
for entity_id in quarantined_accounts:
found = self.id2entity.get(entity_id)
if found:
found.active = False
self.logger.debug("Flagged %d entities as deactivated",
len(quarantined_accounts))
def fetch_spreads(self):
"""Get all spreads from Cerebrum and update L{self.entities} with this.
The spreads _could_ be used for updating various attributes or not
depending on if an entity should be available in different AD systems,
e.g. Exchange, Lync and Sharepoint.
"""
self.logger.debug("Fetch spreads for target type %s...",
self.config['target_type'])
if not self.config['target_type']:
# Don't know what spreads to fetch if we don't know the entity type
return
# TODO: Need to check what spreads we really need - slow to fetch all
# spreads for an entity type...
i = 0
es = Entity.EntitySpread(self.db)
for row in es.list_entity_spreads(self.config['target_type']):
ent = self.id2entity.get(int(row['entity_id']))
if ent:
ent.spreads.append(row['spread'])
i += 1
self.logger.debug("Fetched %d entity spreads", i)
def fetch_attributes(self):
"""Get all AD attributes stored in Cerebrum and add them to the cached
entities.
"""
# Check if data from the attribute table is needed:
attrtypes = set()
for c in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.ADAttributeAttr):
attrtypes.update(c.attributes)
if not attrtypes:
return
self.logger.debug("Fetch from attribute table: %s",
', '.join(str(a) for a in attrtypes))
ids = None
if self.config['subset']:
ids = self.id2entity.keys()
# Handle empty lists:
if not ids:
return
i = 0
for row in self.ent.list_ad_attributes(
entity_id=ids,
spread=self.config['target_spread'],
attribute=attrtypes):
e = self.id2entity.get(row['entity_id'], None)
if e:
attr = int(row['attr_code'])
attrcode = self.co.ADAttribute(attr)
if attrcode.multivalued:
e.cere_attributes.setdefault(attr, []).append(row['value'])
else:
e.cere_attributes[attr] = row['value']
i += 1
self.logger.debug("Fetched %d AD attributes from Cerebrum" % i)
def fetch_sids(self):
"""Get all SIDs stored in Cerebrum and add them to the cached entities.
Security ID, or SID, is the identifier for objects in AD with
privileges. Privileges could be set for Users, Groups, Computers and
probably other object types. The SID is readonly, and is automatically
set when the object is created. At some instances, we want to store the
SID for security reasons (auditing).
A SID can not be reused, so when an object is deleted and recreated, it
gets a new SID, and thus also all its presiously set privileges.
TODO: how should SID be stored? We should connect it to spreads, as the
object could have a different SID in the different AD domains, so we
can't just be able to store one. It looks we have to store it in the
table with other AD attributes and don't write it back to AD, as it's
readonly.
"""
self.logger.debug("Fetch SIDs...")
en = Entity.EntityExternalId(self.db)
id_type = self.co.EntityExternalId(
self.sidtype_map[self.config['target_type']])
i = 0
for row in en.search_external_ids(source_system=self.co.system_ad,
id_type=id_type, fetchall=False):
# TODO: how should we get it per spread?
e = self.id2entity.get(row['entity_id'], None)
if e:
e.sid = row['external_id']
i += 1
self.logger.debug("Fetched %d SIDs from Cerebrum" % i)
def fetch_names(self):
"""Get all the entity names for the entities from Cerebrum.
"""
self.logger.debug("Fetch name information...")
variants = set()
systems = set()
languages = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.NameAttr):
variants.update(atr.name_variants)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
if atr.languages:
languages.update(atr.languages)
if not variants:
return
self.logger.debug("Fetch names of the types: %s", variants)
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
if not languages:
# By setting to None we fetch all languages:
languages = None
ids = None
if self.config['subset']:
ids = self.owner2ent.keys()
i = 0
for row in self.ent.search_name_with_language(
name_variant=variants, entity_id=ids,
entity_type=self.config['entity_type'],
name_language=languages):
for ent in self.owner2ent.get(row['entity_id'], ()):
vari = str(self.co.EntityNameCode(row['name_variant']))
lang = str(self.co.LanguageCode(row['name_language']))
ent.entity_name_with_language.setdefault(
vari, {})[lang] = row['name']
i += 1
self.logger.debug("Found %d names" % i)
def calculate_ad_values(self):
"""Use Cerebrum data to calculate the needed attributes.
"""
for ent in self.entities.itervalues():
ent.calculate_ad_values()
def cache_entity(self, entity_id, entity_name, *args, **kwargs):
"""Wrapper method for creating a cache object for an entity.
The object class is created dynamically, depending on the config and
what subclasses of the sync is in use. This method returns an object
out of the correct classes.
You should call this method for new cache objects instead of creating
it directly, for easier subclassing.
@type entity_id: int
@param entity_id: The entity's entity_id
@type entity_name: str or unicode
@param entity_name: The entity's name, normally the entity_name.
@type *args: mixed
@param *args: More arguments that should be passed on to the object at
instantiation.
@type *kwargs: mixed
@param *kwargs:
More arguments that should be passed on to the object at
instantiation.
@rtype: Cerebrum.modules.ad2.CerebrumData.CerebrumEntity
@return: A proper instantiated subclass for L{CerebrumEntity}.
"""
return self._object_class(self.logger, self.config, entity_id,
entity_name, *args, **kwargs)
def start_fetch_ad_data(self, object_class=None, attributes=dict()):
"""Send request(s) to AD to start generating the data we need.
Could be subclassed to get more/other data.
@type object_class: str
@param object_class:
What object class to get from AD, e.g. 'user' or 'group'. If not
set, use what is defined in config or object.
@type attributes: list
@param attributes: Extra attributes that should be retrieved from AD.
The attributes defined in the config is already set.
@rtype: string
@return:
A CommandId that is the servere reference to later get the data
that has been generated.
"""
if not object_class:
object_class = self.ad_object_class
attrs = self.config['attributes'].copy()
if attributes:
attrs.update(attributes)
self.logger.debug2("Try to fetch %d attributes: %s", len(attrs),
', '.join(sorted(attrs)))
# Some attributes are readonly, so they shouldn't be put in the list,
# but we still need to receive them if they are used, like the SID.
if self.config['store_sid'] and 'SID' not in attrs:
attrs['SID'] = None
return self.server.start_list_objects(ou=self.config['search_ou'],
attributes=attrs,
object_class=object_class)
def process_ad_data(self, commandid):
"""Start processing the data from AD. Each object from AD is sent
through L{process_ad_object} for further processing.
:type commandid: tuple
:param commandid:
The CommandId for the command that has been executed on the server
to generate a list of objects.
:raise PowershellException:
For instance OUUnknownException if the given OU to search in does
not exist.
"""
i = 0
for ad_object in self.server.get_list_objects(commandid):
if i == 0:
self.logger.debug2("Retrieved %d attributes: %s",
len(ad_object),
', '.join(sorted(ad_object.keys())))
try:
self.process_ad_object(ad_object)
except ADUtils.NoAccessException as e:
# Access errors could be given to the AD administrators, as
# Cerebrum are not allowed to fix such issues.
self.add_admin_message(
'warning', 'Missing access rights for %s: %s' % (
ad_object['DistinguishedName'], e))
# TODO: do we need to strip out data from the exceptions? Could
# it for instance contain passwords?
except PowershellException as e:
self.logger.warn("PowershellException for %s: %s" %
(ad_object['DistinguishedName'], e))
else:
i += 1
self.logger.debug("Received and processed %d objects from AD" % i)
return i
def process_ad_object(self, ad_object):
"""Compare an AD-object with Cerebrum and update AD with differences.
Basic functionality for what to do with an object, compared to what is
stored in Cerebrum. Could be subclassed to add more functionality. This
command is called both when updating existing objects, but also if an
entity didn't exist in AD and just got created.
:type ad_object: dict
:param ad_object:
A dict with information about the AD object from AD. The dict
contains mostly the object's attributes.
:rtype bool:
True if the AD object is processed and can be processed further.
False is returned if it should not be processed further either
because it is in a OU we shouldn't touch, or doesn't exist in
Cerebrum. Subclasses might still want to process the object in some
way, but for most cases this is the regular situations where the
object should not be processed further.
"""
name = ad_object['Name']
dn = ad_object['DistinguishedName']
if 'UserAccountControl' in ad_object:
self.logger.debug3("For %s UAC: %s" % (name,
ad_object['UserAccountControl']))
ent = self.adid2entity.get(name.lower())
if ent:
ent.in_ad = True
if ent.entity_id in self.exempt_entities:
self.logger.debug3(
'Entity {0} marked as exempt, ignoring'.format(
ent.entity_id))
return False
ent.ad_data['dn'] = dn
# Don't touch others than from the subset, if set:
if self.config.get('subset'):
# Convert names to comply with 'name_format':
subset_names = (self._format_name(s) for s in
self.config['subset'])
if name not in subset_names:
self.logger.debug3("Ignoring due to subset: %s", name)
return False
# Don't touch those in OUs we should ignore:
if any(dn.endswith(ou) for ou in self.config.get('ignore_ou', ())):
self.logger.debug('Object in ignore_ou: %s' % dn)
return False
# If not found in Cerebrum, remove the object (according to config):
if not ent:
self.logger.debug2("Unknown object %s - %s" % (name, ad_object))
self.downgrade_object(ad_object,
self.config.get('handle_unknown_objects',
('disable', None)))
return False
# If not active in Cerebrum, do something (according to config).
# TODO: If downgrade is set to 'move', it conflicts with moving
# objects. How to solve this?
if not ent.active:
self.downgrade_object(ad_object,
self.config['handle_deactivated_objects'])
if self.config['move_objects']:
# Do not move if downgrade is set to move objects:
if (ent.active or
self.config['handle_deactivated_objects'][0] != 'move'):
self.move_object(ad_object, ent.ou)
# Updating the DN, for later updates in the process:
dn = ','.join((ad_object['DistinguishedName'].split(',')[0],
ent.ou))
ad_object['DistinguishedName'] = dn
# Compare attributes:
changes = self.get_mismatch_attributes(ent, ad_object)
if changes:
# Save the list of changes for possible future use
ent.changes = changes
self.server.update_attributes(dn, changes, ad_object)
self.script('modify_object', ad_object, changes=changes.keys())
# Store SID in Cerebrum
self.store_sid(ent, ad_object.get('SID'))
return True
def get_mismatch_attributes(self, ent, ad_object):
"""Compare an entity's attributes between Cerebrum and AD.
If the attributes exists in both places, it should be updated if it
doesn't match. If it only exists
The changes gets appended to the entity's change list for further
processing.
:type ent: CerebrumEntity
:param ent:
The given entity from Cerebrum, with calculated attributes.
:type ad_object: dict
:param ad_object:
The given attributes from AD for the target object.
:rtype: dict
:return:
The list of attributes that doesn't match and should be updated.
The key is the name of the attribute, and the value is a dict with
the elements:
- *add*: For elements that should be added to the attribute in AD.
- *remove*: For elements that should be removed from the attribute.
- *fullupdate*: For attributes that should be fully replaced.
The result could be something like::
{'Member': {
'add': ('userX', 'userY',),
'remove': ('userZ',),
},
'Description': {
'fullupdate': 'New description',
},
}
"""
ret = {}
for atr, atrconfig in self.config['attributes'].iteritems():
value = ent.attributes.get(atr, None)
ad_value = ad_object.get(atr, None)
# Filter/convert the value from AD before getting compared:
if ad_value and isinstance(atrconfig, ConfigUtils.AttrConfig):
if atrconfig.ad_transform:
ad_value = atrconfig.ad_transform(ad_value)
mismatch, add_elements, remove_elements = \
self.attribute_mismatch(ent, atr, value, ad_value)
if mismatch:
ret[atr] = dict()
if add_elements or remove_elements:
self.logger.debug("Mismatch attr for %s: %s.",
ent.entity_name, atr)
if add_elements:
self.logger.debug(
" - adding: %s",
'; '.join('%s (%s)' % (m, type(m)) for m in
add_elements))
ret[atr]['add'] = add_elements
if remove_elements:
self.logger.debug(
" - removing: %s",
'; '.join('%s (%s)' % (m, type(m)) for m in
remove_elements))
ret[atr]['remove'] = remove_elements
else:
self.logger.debug(
"Mismatch attr %s for %s: '%s' (%s) -> '%s' (%s)",
atr, ent.entity_name, ad_value, type(ad_value),
value, type(value))
ret[atr]['fullupdate'] = value
return ret
def attribute_mismatch(self, ent, atr, c, a):
"""Compare an attribute between Cerebrum and AD.
This is a generic method. Specific attributes should not be hardcoded
in this method, but should rather be configurable, or might be
subclassed even though that should be avoided (try to generalize).
The attributes are matched in different ways. The order does for
example not matter for multivalued attributes, i.e. lists.
:type ent: CerebrumEntity
:param ent:
The given entity from Cerebrum, with calculated attributes.
:type atr: str
:param atr: The name of the attribute to compare
:type c: mixed
:param c: The value from Cerebrum for the given attribute
:type a: mixed
:param a: The value from AD for the given attribute
:rtype: tuple(bool, list, list)
:return:
A tuple with three elements::
(<bool:is_mismatching>, <set:to_add>, <set:to_remove>)
The first value is True if the attribute from Cerebrum and AD does
not match and should be updated in AD. If the attribute is a list
and only some of its elements should be updated, the second and the
third values list the elements that should be respectively added or
removed.
"""
# TODO: Should we care about case sensitivity?
# Ignore the cases where an attribute is None in Cerebrum and an empty
# string in AD:
if c is None and a == '':
return (False, None, None)
# TODO: Should we ignore attributes with extra spaces? AD converts
# double spaces into single spaces, e.g. GivenName='First Last'
# becomes in AD 'First Last'. This is issues that should be fixed in
# the source system, but the error will make the sync update the
# attribute constantly and make the sync slower.
# SAMAccountName must be matched case insensitively. TODO: Case
# sensitivity should rather be configurable.
if atr.lower() == 'samaccountname':
if a is None or c.lower() != a.lower():
return (True, None, None)
# Order does not matter in multivalued attributes
seq = (list, tuple, set)
if isinstance(c, seq) and (isinstance(a, seq) or a is None):
a = a or list()
to_add = set(c).difference(a)
to_remove = set(a).difference(c)
# Search for objects that might have a similar DN to what Cerebrum
# expects. We need to handle this, since people tend to move stuff
# about in AD :/ Only the objects that have a unique RDN are
# collected.
do_not_remove = {}
for e in to_remove:
if 'cn' in e:
rdn = e[3:e.find(',')]
objects = self.server.find_object(
attributes={'CN': rdn})
if len(objects) == 1:
do_not_remove[rdn] = e
# Remove the objects that have an alternate unique RDN, from the
# list of attributes to remove.
c = map(lambda x: do_not_remove.get(x[3:x.find(',')], x), c)
# Re-calculate the set-difference
to_remove = set(a).difference(c)
return (to_add or to_remove, list(to_add), list(to_remove))
return (c != a, None, None)
def process_entities_not_in_ad(self):
"""Go through entities that wasn't processed while going through AD.
This could mean that either the entity doesn't exist in AD and should
be created, or that the object is in an OU that we are not processing.
The entities should probably be created in AD, but that is up to a
subclass to decide.
"""
# Do a count of how many it is, for debuggin
self.logger.debug("Found %d entities not found in AD",
len(filter(lambda x: not x.in_ad,
self.entities.itervalues())))
i = 0
for ent in self.entities.itervalues():
if ent.in_ad:
continue
if ent.entity_id in self.exempt_entities:
self.logger.debug3(
'Entity {0} marked as exempt, ignoring'.format(
ent.entity_id))
i += 1
continue
try:
self.process_entity_not_in_ad(ent)
except ADUtils.NoAccessException as e:
# Access errors should be sent to the AD administrators, as
# Cerebrum can not fix this.
self.add_admin_message('warning',
'Missing access rights for %s: %s' % (
ent.ad_id, e))
except PowershellException as e:
self.logger.warn("PowershellException for %s: %s" %
(ent.entity_name, e))
else:
i += 1
self.logger.debug('Successfully processed %d entities not in AD' % i)
def process_entity_not_in_ad(self, ent):
"""Process an entity that doesn't exist in AD, yet.
The entity should be created in AD if active, and should then be
updated as other, already existing objects.
@type: CerebrumEntity
@param: An object representing an entity in Cerebrum.
"""
if not ent.active:
if self.config['handle_deactivated_objects'][0] == 'delete':
self.logger.debug("Inactive entity ignored: %s",
ent.entity_name)
return
else:
self.logger.debug("Not in AD, and also not active: %s",
ent.entity_name)
try:
obj = self.create_object(ent)
except ADUtils.ObjectAlreadyExistsException as e:
# It exists in AD, but is probably somewhere out of our
# search_base. Will try to get it, so we could still update it, and
# maybe even move it to the correct OU.
self.logger.debug("Entity already exists: %s", ent.entity_name)
ent.in_ad = True
attrs = self.config['attributes'].copy()
if self.config['store_sid'] and 'SID' not in attrs:
attrs['SID'] = None
# TODO! Are there more unique attributes that can be used to
# search? For user objects it seems it is enough with
# 'SamAccountName' only. See
# http://blogs.msdn.com/b/openspecification/archive/2009/07/10/\
# understanding-unique-attributes-in-active-directory.aspx
search_attributes = dict((u, ent.attributes[u]) for u
in ['SamAccountName']
if ent.attributes.get(u))
objects = self.server.find_object(
name=ent.entity_name,
attributes=search_attributes,
object_class=self.ad_object_class)
if len(objects) == 1:
# Found only one object, and it is most likely the one we need
obj = objects[0]
self.logger.debug("Found entity %s (%s)", ent.entity_name,
obj['DistinguishedName'])
elif len(objects) == 0:
# Strange, we can't find the object though AD says it exists!
self.logger.error("Cannot find %s, though AD says it exists",
ent.ad_id)
return False
else:
# Found several objects that satisfy the search criterias.
# Unfortunately, in this case we can't determine which one
# we actually need.
self.logger.error("Ambiguous object %s. Found several with "
"the same name. Cannot determine which "
"one is the right one.", ent.ad_id)
return False
except (ADUtils.SetAttributeException,
CommandTooLongException) as e:
# The creation of the object may have failed because of entity's
# attributes. It may have been too many of them and the command
# became too long, or they contained (yet) invalid paths in AD.
# In many cases update_attributes function for existing objects
# can fix attributes problem. So it's good to try to create an
# object without attributes now and wait until the next round for
# its attributes to be updated.
self.logger.warning("""Failed creating %s. """
"""Trying to create it without attributes"""
% ent.ad_id)
# SamAccountName is needed to be present upon object's creation.
# It will default to name if it is not present. But if it is --
# it has to be preserved.
original_samaccountname = ent.attributes.get('SamAccountName')
if original_samaccountname:
ent.attributes = {'SamAccountName': original_samaccountname}
else:
ent.attributes = {}
try:
obj = self.create_object(ent)
except Exception:
# Really failed
self.logger.exception("Failed creating %s." % ent.ad_id)
return False
else:
ent.ad_new = True
except Exception:
# Unforeseen exception; traceback will be logged
self.logger.exception("Failed creating %s." % ent.ad_id)
return False
else:
ent.ad_new = True
ent.in_ad = True
ent.ad_data['dn'] = obj['DistinguishedName']
if not ent.ad_new:
# It is an existing object, but under wrong OU (otherwise it would
# have been fetched earlier). It should be therefore passed to
# process_ad_object, like it was done before for all found objects.
# NB! For some upper classes process_ad_object is overridden and
# performs extra actions. In this case they will not be performed,
# but the next iteration of sync should fix this.
self.process_ad_object(obj)
return obj
def create_ou(self, dn):
"""Helper method for creating an OU recursively.
The OUs will only be created if the config says so. TODO: Might want to
change where this is checked.
@type dn: str
@param dn:
The DistinguishedName of the OU that should be created.
"""
if not self.config['create_ous']:
return
self.logger.info("Creating OU: %s" % dn)
name, path = dn.split(',', 1)
if name.lower().startswith('ou='):
name = name[3:]
try:
ou = self.server.create_object(name, path, 'organizationalunit')
except ADUtils.OUUnknownException:
self.logger.info("OU was not found: %s", path)
self.create_ou(path)
# Then retry creating the original OU:
ou = self.server.create_object(name, path, 'organizationalunit')
self.script('new_object', ou)
return ou
def create_object(self, ent, **parameters):
"""Create a given entity in AD.
This is talking with the AD client to create the object properly. You
should subclass this to e.g. add extra parameters to the creation.
@type ent: CerebrumEntity
@param ent: The entity that should be created in AD.
@type **parameters: mixed
@param **parameters: Extra data that should be sent to AD when creating
the object.
@raise ObjectAlreadyExistsException: If an object with the same name or
id existed in AD already.
"""
try:
if self.ad_object_class == 'group':
parameters['GroupScope'] = self.new_group_scope
parameters['GroupCategory'] = self.new_group_type
new_object = self.server.create_object(
ent.ad_id, ent.ou, self.ad_object_class,
attributes=ent.attributes, parameters=parameters)
except ADUtils.OUUnknownException:
self.logger.info("OU was not found: %s", ent.ou)
if not self.config['create_ous']:
raise
self.create_ou(ent.ou)
# Then retry creating the object:
new_object = self.create_object(ent, **parameters)
self.script('new_object', new_object)
return new_object
def downgrade_object(self, ad_object, action):
"""Do a downgrade of an object in AD.
The object could for instance be unknown in Cerebrum, or be inactive.
The AD-object could then be disabled, moved and/or deleted, depending
on the setting. The configuration says what should be done with such
objects, as it could be disabled, moved, deleted or something else.
@type ad_object: dict
@param: The data about the AD-object to downgrade.
@type action: tuple
@param action:
A two-element tuple, where the first element is a string, e.g.
'ignore', 'delete', 'move' or 'disable'. The second element
contains extra information, e.g. to what OU the object should be
moved to.
"""
dn = ad_object['DistinguishedName']
# conf = self.config.get('handle_unknown_objects', ('disable', None))
if action[0] == 'ignore':
self.logger.debug2("Downgrade: ignoring AD object: %s", dn)
return
elif action[0] == 'disable':
if not ad_object.get('Enabled'):
return
self.disable_object(ad_object)
elif action[0] == 'move':
if ad_object.get('Enabled'):
self.disable_object(ad_object)
if not dn.lower().endswith(action[1].lower()):
self.logger.debug("Downgrade: moving from '%s' to '%s'", dn,
action[1])
# TODO: test if this works as expected!
self.move_object(ad_object, action[1])
return True
elif action[0] == 'delete':
self.delete_object(ad_object)
else:
raise Exception("Unknown config for downgrading object %s: %s" %
(ad_object.get('Name'), action))
def disable_object(self, ad_object):
""" Disable the given object.
:param dict ad_object: The object as retrieved from AD.
"""
self.server.disable_object(ad_object['DistinguishedName'])
self.script('disable_object', ad_object)
def enable_object(self, ad_object):
""" Enable the given object.
:param dict ad_object: The object as retrieved from AD.
"""
self.server.enable_object(ad_object['DistinguishedName'])
# TODO: If we run scripts here, we'll also have to consider
# - set_password + enable_object
# - quicksync + quarantines
# self.script('enable_object', ad_object)
def delete_object(self, ad_object):
""" Delete the given object.
:param dict ad_object: The object as retrieved from AD.
"""
self.server.delete_object(ad_object['DistinguishedName'])
# TODO: If we run scripts here, we'll also have to consider
# - quicksync + quarantines
# self.script('delete_object', ad_object)
def move_object(self, ad_object, ou):
"""Move a given object to the given OU.
It is first checked for if it's already in the correct OU.
@type ad_object: dict
@param ad_object: The object as retrieved from AD.
@type ou: string
@param ou: The full DN of the OU the object should be moved to.
"""
dn = ad_object['DistinguishedName']
self.logger.debug3("Trying to move %s to %s", dn, ou)
if ou == dn.split(',', 1)[1]:
# Already in the correct location
return
try:
self.server.move_object(dn, ou)
except ADUtils.OUUnknownException:
self.logger.info("OU was not found: %s", ou)
if not self.config['create_ous']:
raise
self.create_ou(ou)
self.server.move_object(dn, ou)
# Update the dn, so that it is correct when triggering event
ad_object['DistinguishedName'] = ','.join((dn.split(',', 1)[0], ou))
self.script('move_object', ad_object, move_from=dn)
def pre_process(self):
"""Hock for things to do before the sync starts."""
self.script('pre_sync')
def post_process(self):
"""Hock for things to do after the sync has finished."""
self.script('post_sync')
def store_sid(self, ent, sid):
"""Store the SID for an entity as an external ID in Cerebrum.
@type ent: CerebrumEntity
@param ent: The object of the Cerebrum entity for which the SID should
be stored.
@type sid: string
@param sid: The SID from AD which should be stored.
"""
if not self.config['store_sid']:
return
if getattr(ent, 'sid', '') == sid:
return
self.logger.info("Storing SID for entity %s: %s", ent.entity_id, sid)
en = self._ent_extid
en.clear()
en.find(ent.entity_id)
# Since external_id only works for one type of entities, we need to
# find out which external_id type to store the SID as:
sid_type = self.sidtype_map[en.entity_type]
en.affect_external_id(self.co.system_ad, sid_type)
en.populate_external_id(self.co.system_ad, sid_type, sid)
en.write_db()
def script(self, action, ad_object=None, ent=None, **extra):
"""Check if a script of a given type is defined and execute it.
The scripts have to be set up by the AD administrators, Cerebrum has
only the responsibility to fire them up.
@type action: string
@param action: The type of event that has occured, and which could be
triggering a script to be executed. The script location is found in
the config.
@type ad_object: dict
@param ad_object: The data about the object to be targeted by the
script.
@type ent: CerebrumEntity
@param ent: The entity that is targeted by the script. Not always
needed.
@type **extra: mixed
@param **extra: Extra arguments for the script, the arguments are
transformed into:
-key1 value1
"""
if action not in self.config['script']:
return
params = {'Action': action, 'UUID': str(uuid.uuid4()), }
for attr in ('DistinguishedName', 'ObjectGUID'):
if ad_object and ad_object.get(attr):
params.update({'Identity': ad_object[attr], })
break
if extra:
params.update(extra)
try:
return self.server.execute_script(self.config['script'][action],
**params)
except PowershellException as e:
self.logger.warn(
"Script failed for event %s (%s): %s",
action, params.get('UUID'), e)
return False
class UserSync(BaseSync):
"""Sync for Cerebrum accounts in AD.
This contains generic functionality for handling accounts for AD, to add
more functionality you need to subclass this.
A mapping is added by this class: L{owner2ent}, which is a dict with the
owner's owner_id as key, and the values are lists of entity instances.
"""
# The default object class of the objects to work on. Used if not the
# config says otherwise.
default_ad_object_class = 'user'
# A mapping of what the different UserAccountControl settings map to,
# bitwise. The UserAccountControl attribute is returned as a integer, where
# each bit gives us one setting. This setting should be expanded if new
# settings are added to AD. Note that the config tells us what settings we
# should care about and not. The position in this list maps to the bit
# position, starting from the right. Each string corresponds to the
# setting's name in the powershell command Set-ADAccountControl.
# For more info about the UAC settings, see
# http://msdn.microsoft.com/en-us/library/ms680832(v=vs.85).aspx
_useraccountcontrol_settings = (
# 1. If the logon script will be run. Not implemented.
None, # 'Script',
# 2. If the account is disabled. Set by Disable-ADAccount instead.
None, # 'AccountDisabled',
# 3. The home directory is required.
'HomedirRequired',
# 4. The account is currently locked out, e.g. by too many failed
# password attempts. Gets set and reset automatically by AD DS.
None, # 'LockOut',
# 5. No password is required to log on with the given account.
'PasswordNotRequired',
# 6. The user can't change its password.
'CannotChangePassword',
# 7. The user can send an encrypted password. Updates the value
# which in AD is named ADS_UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED.
'AllowReversiblePasswordEncryption',
# 8. The account is for users whose primary account is in another
# domain. This account provides local domain access. Also called
# "Local user account". Not implemented.
None, # 'TempDuplicateAccount',
# 9. A normal account. This is the default type of an account. Not
# implemented.
None, # 'NormalAccount',
# 10. Trusts the account for other domains. Not implemented.
None, # 'InterdomainTrustAccount',
# 11. If set, this is a computer account. Not implemented. Needs to
# be set in other ways.
None, # 'WorkstationTrustAccount',
# 12. If set, this is a computer account for a system backup domain
# controller that is a member of this domain.
None, # 'ServerTrustAccount',
# 13. Not used
None,
# 14. Not used
None,
# 15. The password for the account will never expire.
'PasswordNeverExpires',
# 16. If set, this is an MNS logon account.
'MNSLogonAccount',
# 17. Force user to log on by smart card. Not implemented.
None, # 'SmartcardRequired',
# 18. The service account is trusted for Kerberos delegation. Any
# such service can impersonate a client requesting the service.
'TrustedForDelegation',
# 19. The service account's security context will not be delegated
# to any service.
'AccountNotDelegated',
# 20. Restrict account to only use DES encryption types for keys.
'UseDESKeyOnly',
# 21. Account does not require Kerberos pre-authentication for
# logon.
'DoesNotRequirePreAuth',
# 22. The account's password is expired. Automatically set by AD.
'PasswordExpired',
# 23. Enabled for delegation of authentication of others.
# Warning: This setting enables the account and services running
# as the account to authenticate as other users!
'TrustedToAuthForDelegation',
# 24. Account is used for read-only DCs, and needs protection.
None, # 'PartialSecretsAccount',
)
def __init__(self, *args, **kwargs):
"""Instantiate user specific functionality."""
super(UserSync, self).__init__(*args, **kwargs)
self.addr2username = {}
self.ac = Factory.get("Account")(self.db)
self.pe = Factory.get("Person")(self.db)
def configure(self, config_args):
"""Override the configuration for setting user specific variables.
"""
super(UserSync, self).configure(config_args)
# Check that the UserAccountControl settings are valid:
for setting in self.config['useraccountcontrol']:
if setting not in self._useraccountcontrol_settings:
raise Exception('Unknown UserAccountControl: %s' % setting)
def start_fetch_ad_data(self, object_class=None, attributes=dict()):
"""Ask AD to start generating the data we need about groups.
Could be subclassed to get more/other data.
@rtype: string
@return: A CommandId that is the reference from the AD service to later
get the data that has been generated. Could be used for e.g.
L{process_ad_data}.
"""
if self.config['useraccountcontrol']:
attributes['UserAccountControl'] = None
if 'Enabled' not in attributes:
attributes['Enabled'] = None
return super(UserSync, self).start_fetch_ad_data(
object_class=object_class, attributes=attributes)
def fetch_cerebrum_data(self):
"""Fetch data from Cerebrum that is needed for syncing accounts.
What kind of data that will be gathered is up to the attribute
configuration. Contact info will for instance not be retrieved from
Cerebrum if it's set for any attributes. Subclasses could however
override this, if they need such data for other usage.
"""
super(UserSync, self).fetch_cerebrum_data()
# No need to fetch Cerebrum data if there are no entities to add them
# to. Some methods in the Cerebrum API also raises an exception if
# given an empty list of entities.
if not self.entities:
return
# Create a mapping of owner id to user objects
self.logger.debug("Fetch owner information...")
self.owner2ent = dict()
for ent in self.entities.itervalues():
self.owner2ent.setdefault(ent.owner_id, []).append(ent)
self.logger.debug("Mapped %d entity owners", len(self.owner2ent))
# Set what is primary accounts.
i = 0
for row in self.ac.list_accounts_by_type(primary_only=True):
ent = self.id2entity.get(row['account_id'])
if ent:
ent.is_primary_account = True
i += 1
self.logger.debug("Found %d primary accounts", i)
# The different methods decides if their data should be fetched or not,
# depending on the attribute configuration.
self.fetch_contact_info()
self.fetch_names()
self.fetch_person_names()
self.fetch_external_ids()
self.fetch_traits()
self.fetch_address_info()
self.fetch_posix()
self.fetch_homes()
self.fetch_mail()
def fetch_cerebrum_entities(self):
"""Fetch the users from Cerebrum that should be compared against AD.
The configuration is used to know what to cache. All data is put in a
list, and each entity is put into an object from
L{Cerebrum.modules.ad2.CerebrumData} or a subclass, to make it easier
to later compare them with AD objects.
Could be subclassed to fetch more data about each entity to support
extra functionality from AD and to override settings, e.g. what contact
info that should be used.
@rtype: list
@return: A list of targeted entities from Cerebrum, wrapped into
L{CerebrumData} objects.
"""
# Find all users with defined spread(s):
self.logger.debug("Fetching users with spread %s" %
(self.config['target_spread'],))
subset = self.config.get('subset')
if hasattr(self.co, 'trait_account_exempt'):
for row in self._entity_trait.list_traits(
self.co.trait_account_exempt):
self.exempt_entities.append(int(row['entity_id']))
for row in self.ac.search(spread=self.config['target_spread']):
uname = row["name"]
# For testing or special cases where we only want to sync a subset
# of entities. The subset should contain the entity names, e.g.
# usernames or group names.
if subset and uname not in subset:
continue
self.entities[uname] = self.cache_entity(
int(row['account_id']),
uname,
owner_id=int(row['owner_id']),
owner_type=int(row['owner_type']))
# This functionality makes it possible to set a different AD-OU
# based on the type(s) og affiliation(s) (CRB-862)
ou_mappings = self.config.get('ou_mappings')
if ou_mappings and isinstance(ou_mappings, list):
for mapping in ou_mappings:
aff_list = mapping.get('affiliations')
if not aff_list:
raise ConfigUtils.ConfigError(
'Missing or invalid affiliations in ou_mappings')
validator = ConfigUtils.AccountCriterias(
affiliations=aff_list)
try:
validator.check(self.entities[uname])
self.entities[uname].ou = mapping['ou']
self.logger.debug3(
'Using "ou_mappings". '
'OU for account %s (%d) has been set to %s' % (
uname,
row['account_id'],
mapping['ou']))
break
except ConfigUtils.CriteriaError: # no match
continue
else:
self.logger.debug(
'Using "ou_mappings". '
'No matching affiliation(s) for account %s (%d). '
'Using "target_ou"' % (
uname,
row['account_id']))
def fetch_names(self):
"""Fetch all the persons' names and store them for the accounts.
This overrides the default behaviour of fetching the names registered
for the given entities, but instead fetches the owner's (person's)
names.
The names that is retrieved are first and last names. Titles are
retrieved in L{fetch_titles}, even though they're stored as names too.
TODO: change this, and put all in a dict of names instead?
If there exist personal accounts without first and last names, it gets
logged.
"""
self.logger.debug("Fetch name information...")
# First PersonName:
variants = set()
systems = set()
languages = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.NameAttr):
variants.update(atr.name_variants)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
if atr.languages:
languages.update(atr.languages)
self.logger.debug2("Fetching name variants: %s",
', '.join(str(v) for v in variants))
self.logger.debug2("Fetching names by languages: %s",
', '.join(str(l) for l in languages))
self.logger.debug2("Fetching names from sources: %s",
', '.join(str(s) for s in systems))
if not variants:
return
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
if not languages:
languages = None
# TODO: Or make use of self.config['language'] to get the priority
# right?
# If subset is given, we want to limit the db-search:
ids = None
if self.config['subset']:
ids = self.owner2ent.keys()
i = 0
# TODO: This is not always for persons! Need to also fetch for e.g.
# OUs. Do we need to fetch in two rounds? One for the entities and one
# for the owners?
for row in self.pe.search_name_with_language(name_variant=variants,
entity_type=self.co.entity_person,
entity_id=ids,
name_language=languages):
for ent in self.owner2ent.get(row['entity_id'], ()):
vari = str(self.co.EntityNameCode(row['name_variant']))
lang = str(self.co.LanguageCode(row['name_language']))
ent.entity_name_with_language.setdefault(vari, {})[lang] = row['name']
i += 1
self.logger.debug("Found %d names" % i)
def fetch_person_names(self):
"""Fetch all the persons' names and store them for the accounts.
This overrides the default behaviour of fetching the names registered
for the given entities, but instead fetches the owner's (person's)
names.
The names that is retrieved are first and last names. Titles are
retrieved in L{fetch_titles}, even though they're stored as names too.
TODO: change this, and put all in a dict of names instead?
If there exist personal accounts without first and last names, it gets
logged.
"""
self.logger.debug("Fetch person name information...")
variants = set()
systems = set()
# languages = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.PersonNameAttr):
variants.update(atr.name_variants)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
self.logger.debug2("Fetching person person name variants: %s",
', '.join(str(v) for v in variants))
self.logger.debug2("Fetching person names from sources: %s",
', '.join(str(s) for s in systems))
if not variants:
return
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
# If subset is given, we want to limit the db-search:
ids = None
if self.config['subset']:
ids = self.owner2ent.keys()
# Names stored in person table:
i = 0
for row in self.pe.search_person_names(source_system=systems,
name_variant=variants,
person_id=ids):
for ent in self.owner2ent.get(row['person_id'], ()):
vari = str(self.co.PersonName(row['name_variant']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.person_names.setdefault(vari, {})[ssys] = row['name']
i += 1
self.logger.debug("Found %d person names" % i)
def fetch_contact_info(self):
"""Fetch all contact information for users, e.g. mobile and telephone.
Checks the config for what contact info to fetch from Cerebrum, fetches
it and puts them in each CerebrumEntity's dict L{contact_info}. The
format of the dict must be matched from this method and the
CerebrumEntity class. Example on how L{contact_info} could look like:
{str(contacttypeA):
{str(sourcesystemA): str(contactvalue),
str(sourcesystemB): str(contactvalue),
},
str(contacttypeB):
{str(sourcesystemA): str(contactvalue),
str(sourcesystemB): str(contactvalue),
},
}
"""
self.logger.debug("Fetch contact info...")
types = set()
systems = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.ContactAttr):
types.update(atr.contact_types)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
self.logger.debug2("Fetching contact-types: %s",
', '.join(str(t) for t in types))
self.logger.debug2("Fetching contactinfo from sources: %s",
', '.join(str(s) for s in systems))
if not types:
return
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
# Limit the db search if only working for a subset
ids = None
if self.config['subset']:
ids = self.owner2ent.keys()
# Contact info stored on the person:
i = 0
for row in self.pe.list_contact_info(source_system=systems,
entity_type=self.co.entity_person,
entity_id=ids,
contact_type=types):
for ent in self.owner2ent.get(row['entity_id'], ()):
ctype = str(self.co.ContactInfo(row['contact_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.contact_info.setdefault(ctype, {})[ssys] = row
i += 1
# Contact info stored on the account:
for row in self.ac.list_contact_info(source_system=systems,
entity_type=self.co.entity_account,
entity_id=ids,
contact_type=types):
ent = self.id2entity.get(row['entity_id'], None)
if ent:
ctype = str(self.co.ContactInfo(row['contact_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.contact_info.setdefault(ctype, {})[ssys] = row
i += 1
self.logger.debug("Found %d contact data" % i)
def fetch_external_ids(self):
"""Fetch all external IDs for entities according to config.
TODO: this should be moved upwards, as it's not only for users.
"""
types = set()
systems = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.ExternalIdAttr):
types.update(atr.id_types)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
if not types:
return
self.logger.debug("Fetch external ids...")
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
# Limit the db search if only working for a subset
ids = None
if self.config['subset']:
ids = self.owner2ent.keys()
i = 0
# Search person:
for row in self.pe.search_external_ids(
source_system=systems, id_type=types, entity_id=ids,
entity_type=self.co.entity_person):
for ent in self.owner2ent.get(row['entity_id'], ()):
itype = str(self.co.EntityExternalId(row['id_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.external_ids.setdefault(itype, {})[ssys] = row['external_id']
i += 1
# Search account:
ids = None
if self.config['subset']:
ids = self.id2entity.keys()
for row in self.ac.search_external_ids(
source_system=systems, id_type=types, entity_id=ids,
entity_type=self.co.entity_account):
ent = self.id2entity.get(row['entity_id'], None)
if ent:
itype = str(self.co.EntityExternalId(row['id_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.external_ids.setdefault(itype, {})[ssys] = row['external_id']
i += 1
self.logger.debug("Found %d external IDs" % i)
def fetch_traits(self):
"""Fetch all traits for entities according to config.
TODO: this should be moved upwards, as it's not only for users.
"""
types = set()
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.TraitAttr):
types.update(atr.traitcodes)
if not types:
return
self.logger.debug2("Fetch traits of types: %s",
', '.join(str(t) for t in types))
ids = NotSet
if self.config['subset']:
ids = self.id2entity.keys()
i = 0
for row in self.ent.list_traits(code=types, entity_id=ids):
ent = self.id2entity.get(row['entity_id'], None)
if ent:
code = str(self.co.EntityTrait(row['code']))
ent.traits[code] = row
i += 1
self.logger.debug("Found %d traits" % i)
# TODO: Fetch from person too? Is that needed?
def fetch_address_info(self):
"""Fetch addresses for users.
"""
adrtypes = set()
systems = set()
all_systems = False
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.AddressAttr):
adrtypes.update(atr.address_types)
if atr.source_systems is None:
all_systems = True
else:
systems.update(atr.source_systems)
if not adrtypes:
return
self.logger.debug("Fetch address info...")
if all_systems or not systems:
# By setting to None we fetch from all source_systems.
systems = None
i = 0
# Addresses stored on the person:
if hasattr(self.pe, 'list_entity_addresses'):
for row in self.pe.list_entity_addresses(
source_system=systems,
entity_type=self.co.entity_person,
address_type=adrtypes):
for ent in self.owner2ent.get(row['entity_id'], ()):
atype = str(self.co.Address(row['address_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.addresses.setdefault(atype, {})[ssys] = row
i += 1
# Contact info stored on the account:
if hasattr(self.ac, 'list_entity_addresses'):
for row in self.ac.list_entity_addresses(
source_system=systems,
entity_type=self.co.entity_account,
address_type=adrtypes):
ent = self.id2entity.get(row['entity_id'], None)
if ent:
atype = str(self.co.Address(row['address_type']))
ssys = str(self.co.AuthoritativeSystem(row['source_system']))
ent.addresses.setdefault(atype, {})[ssys] = row
i += 1
self.logger.debug("Found %d addresses" % i)
def fetch_mail(self):
"""Fetch all e-mail address for the users.
This method only fetches the primary addresses. Subclass me if more
e-mail data is needed, e.g. aliases.
TODO: We have a problem here, since we store primary mail addresses
differently for those that uses the Email module and those without it,
which instead stores it as contact_info. Now we check if methods from
the email module exists to check how we should fetch it, but we should
fix this in a better way later.
"""
if not ConfigUtils.has_config(
self.config['attributes'],
(ConfigUtils.EmailQuotaAttr, ConfigUtils.EmailAddrAttr,
ConfigUtils.EmailForwardAttr)):
# No email data is needed, skipping
return
self.logger.debug("Fetch mail data...")
# Limit/speed up db search if only targeting a subset:
ids = None
if self.config['subset']:
ids = self.id2entity.keys()
# Need a map from EmailTarget's target_id to entity_id:
targetid2entityid = dict((r['target_id'], r['target_entity_id']) for r
in self.mailtarget.list_email_targets_ext(
target_entity_id=ids))
for target_id, entity_id in targetid2entityid.iteritems():
ent = self.entities.get(entity_id)
if ent:
ent.maildata['target_id'] = target_id
# Email quotas
if ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.EmailQuotaAttr):
mailquota = Email.EmailQuota(self.db)
i = 0
for row in mailquota.list_email_quota_ext():
if row['target_id'] not in targetid2entityid:
continue
ent = self.id2entity.get(targetid2entityid[row['target_id']])
if ent:
ent.maildata['quota'] = row
i += 1
self.logger.debug("Found %d email quotas" % i)
# Email addresses
if ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.EmailAddrAttr):
ea = Email.EmailAddress(self.db)
# Need a mapping from address_id for the primary addresses:
adrid2email = dict()
i = 0
# TODO: filter_expired could might be a config setting?
for row in ea.search(filter_expired=False):
ent = self.id2entity.get(targetid2entityid.get(row['target_id']))
if ent:
adr = '@'.join((row['local_part'], row['domain']))
adrid2email[row['address_id']] = adr
ent.maildata.setdefault('alias', []).append(adr)
i += 1
self.addr2username[adr.lower()] = ent.entity_name
self.logger.debug("Found %d email addresses", i)
epat = Email.EmailPrimaryAddressTarget(self.db)
i = 0
for row in epat.list_email_primary_address_targets():
if row['address_id'] not in adrid2email:
# Probably expired addresses
continue
ent = self.id2entity.get(targetid2entityid[row['target_id']])
if ent:
ent.maildata['primary'] = adrid2email[row['address_id']]
i += 1
self.logger.debug("Found %d primary email addresses" % i)
# Email forwards
if ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.EmailForwardAttr):
ef = Email.EmailForward(self.db)
i = 0
for row in ef.list_email_forwards():
# Skip not enabled forwards. We should not need those.
if row['enable'] != 'T':
continue
ent_id = targetid2entityid.get(row['target_id'])
if not ent_id:
continue
ent = self.id2entity.get(targetid2entityid[row['target_id']])
if ent:
ent.maildata.setdefault('forward', []).append(
row['forward_to'])
i += 1
self.logger.debug("Found %d forward addresses" % i)
def fetch_homes(self):
"""Fetch all home directories for the the users.
The User objects gets filled with a list of all its home directories in
the L{home} attribute, which is used according to
L{ConfigUtils.AttrConfig.HomeAttr}.
"""
homespreads = set()
# Go through config and see what info needs to be fetched:
for atr in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.HomeAttr):
homespreads.add(atr.home_spread)
if not homespreads:
return
self.logger.debug("Fetch home directories...")
i = 0
for sp in homespreads:
for row in self.ac.list_account_home(
home_spread=sp,
account_spread=self.config['target_spread']):
ent = self.id2entity.get(row['account_id'])
if ent:
if not hasattr(ent, 'home'):
ent.home = {}
tmp = {}
tmp['status'] = row['status']
tmp['homedir'] = self.ac.resolve_homedir(
account_name=row['entity_name'],
disk_path=row['path'],
home=row['home'],
spread=row['home_spread'])
ent.home[row['home_spread']] = tmp
i += 1
self.logger.debug("Found %d account home directories" % i)
def fetch_posix(self):
"""Fetch the POSIX data for users, if needed.
"""
if not ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.PosixAttr):
# No need for any posix data
return
self.logger.debug("Fetch posix data...")
pg = Factory.get('PosixGroup')(self.db)
pu = Factory.get('PosixUser')(self.db)
# Map from group_id to GID:
posix_group_id2gid = dict((eid, gid) for eid, gid in
pg.list_posix_groups())
self.logger.debug("Found %d posix groups", len(posix_group_id2gid))
i = 0
for row in pu.list_posix_users():
ent = self.id2entity.get(row['account_id'], None)
if ent:
if not hasattr(ent, 'posix'):
ent.posix = {}
ent.posix['uid'] = int(row['posix_uid']) or ''
ent.posix['gid'] = posix_group_id2gid.get(row['gid'], '')
ent.posix['shell'] = str(self.co.PosixShell(row['shell']))
ent.posix['gecos'] = row['gecos']
i += 1
self.logger.debug("Found %d posix users", i)
def fetch_passwords(self):
"""Fetch passwords for accounts that are new in AD.
The passwords are stored in L{self.uname2pasw}, and passwords are only
fetched for entities where the attribute L{in_ad} is False. This should
therefore be called after the processing of existing entities and
before processing the entities that doesn't exist in AD yet.
The passwords are fetched from the changelog, and only the last and
newest password is used.
"""
self.logger.debug("Fetching passwords for accounts not in AD")
self.uname2pasw = {}
for row in reversed(
tuple(self.db.get_log_events(
types=self.clconst.account_password))):
try:
ent = self.id2entity[row['subject_entity']]
except KeyError:
# We continue past this event. Since account is not in the
# list of users who should get their password set.
continue
if ent.entity_name in self.uname2pasw:
# We only need the last password for each acount
continue
if ent.in_ad:
# Account is already in AD
continue
# If a GPG recipient ID is set, we fetch the encrypted password
if self.config.get('gpg_recipient_id', None):
gpg_db = GpgData(self.db)
for tag in ('password-base64', 'password'):
gpg_data = gpg_db.get_messages_for_recipient(
entity_id=ent.entity_id,
tag=tag,
recipient=self.config['gpg_recipient_id'],
latest=True)
if gpg_data:
break
else:
self.logger.debug2(
'No GPG encrypted password found for %s',
ent.entity_name)
continue
password = gpg_data[0].get('message')
self.uname2pasw[ent.entity_name] = (password, tag)
else: # we fetch the plaintext from the changelog
try:
password = json.loads(
row['change_params'])['password']
self.uname2pasw[ent.entity_name] = (password,
'plaintext')
except (KeyError, TypeError, IndexError):
self.logger.debug2('No plaintext loadable for %s',
ent.entity_name)
def process_ad_object(self, ad_object):
"""Compare a User object retrieved from AD with Cerebrum.
Overriden for user specific functionality.
"""
if not super(UserSync, self).process_ad_object(ad_object):
return False
ent = self.adid2entity.get(ad_object['Name'].lower())
if ent.active:
if not ad_object.get('Enabled', False):
self.enable_object(ad_object)
def process_entities_not_in_ad(self):
"""Start processing users not in AD.
Depends on the generic superclass' functionality.
"""
# Cache the passwords for the entities not in AD:
self.fetch_passwords()
return super(UserSync, self).process_entities_not_in_ad()
def process_entity_not_in_ad(self, ent):
"""Process an account that doesn't exist in AD, yet.
We should create and update a User object in AD for those who are not
in AD yet. The object should then be updated as normal objects.
@type: CerebrumEntity
@param: An object representing an entity in Cerebrum.
"""
ad_object = super(UserSync, self).process_entity_not_in_ad(ent)
if not ad_object:
self.logger.warn("What to do? Got None from super for: %s" %
ent.entity_name)
return
# TODO: Move this to create_object() instead! Could then add the
# password in the creating call - would be faster.
if ent.ad_new:
# TODO: Is this OK? Should we diable the object?
# We collect the password from the cache, as generated by
# fetch_passwords(). If there is no plaintext available for
# the user, set an empty one.
try:
password, tag = self.uname2pasw[ent.entity_name]
except KeyError:
self.logger.warn('No password set for %s' % ent.entity_name)
return ad_object
self.logger.debug('Trying to set pw for %s', ent.entity_name)
if self.server.set_password(ad_object['DistinguishedName'],
password,
password_type=tag):
# As a security feature, you have to explicitly enable the
# account after a valid password has been set.
if ent.active:
self.enable_object(ad_object)
# If more functionality gets put here, you should check if the entity
# is active, and not update it if the config says so (downgrade).
return ad_object
def process_cl_event(self, row):
"""Process a given ChangeLog event for users.
Overriden to support account specific changes.
@type row: dict of db-row
@param row:
A db-row, as returned from L{changelog.get_events()}. This is the
row that should be processed.
@rtype: bool
@return:
The result from the handler. Should be True if the sync succeeded
or there was no need for the change to be synced, i.e. the log
change could be confirmed. Should only return False if the change
needs to be redone.
@raise UnhandledChangeTypeError?
TODO: Should we have our own exception class that is used if the
method does not know what to do with a given change type? Could
then be used by subclasses.
@raise TODO:
TODO: What exceptions is expected here?
"""
# TODO: Should we create a new account instance per call, to support
# threading?
self.ac.clear()
try:
self.ac.find(row['subject_entity'])
except Errors.NotFoundError:
pass
else:
if hasattr(self.co, 'trait_account_exempt') and \
self.co.trait_account_exempt in self.ac.get_traits():
self.logger.debug('Account {0} has trait {1}, ignoring'.format(
self.ac.entity_id, str(self.co.trait_account_exempt)))
return False
# TODO: clean up code when more functionality is added!
if row['change_type_id'] == self.clconst.account_password:
if self.ac.is_expired():
self.logger.debug("Account %s is expired, ignoring",
row['subject_entity'])
return True
if not self.ac.has_spread(self.config['target_spread']):
self.logger.debug("Account %s without target_spread, ignoring",
row['subject_entity'])
return False
name = self._format_name(self.ac.account_name)
# If a GPG recipient ID is set, we fetch the encrypted password
tag = 'plaintext'
if self.config.get('gpg_recipient_id', None):
gpg_db = GpgData(self.db)
for tag in ('password-base64', 'password'):
gpg_data = gpg_db.get_messages_for_recipient(
entity_id=self.ac.entity_id,
tag=tag,
recipient=self.config['gpg_recipient_id'],
latest=True)
if gpg_data:
break
else:
self.logger.warn(
'Account %s missing GPG encrypted password',
row['subject_entity'])
return False
pw = gpg_data[0].get('message')
else: # we fetch the plaintext from the changelog
try:
pw = json.loads(row['change_params'])['password']
except (KeyError, TypeError, IndexError):
self.logger.warn("Account %s missing plaintext password",
row['subject_entity'])
return False
if not isinstance(pw, unicode):
try:
pw = unicode(pw, 'UTF-8')
except UnicodeDecodeError:
pw = unicode(pw, 'ISO-8859-1')
return self.server.set_password(name, pw, password_type=tag)
elif row['change_type_id'] in (self.clconst.quarantine_add,
self.clconst.quarantine_del,
self.clconst.quarantine_mod,
self.clconst.quarantine_refresh):
change = self.clconst.ChangeType(row['change_type_id'])
if not hasattr(self.ac, 'entity_id'):
self.logger.debug(
"Can only handle %s for accounts, entity_id: %s",
change, row['subject_entity'])
# Remove the event, since we can't do anything about it. Also,
# the fullsync will take care of any weird situations.
return True
if not self.ac.has_spread(self.config['target_spread']):
self.logger.debug("Account %s without target_spread, ignoring",
row['subject_entity'])
# The fullsync takes care of disabling accounts without AD
# spread.
return True
ent = self.cache_entity(self.ac.entity_id,
self.ac.account_name)
# TODO/TBD: Should we trigger the enable/disable scripts here? We
# have no AD-object to pass to the self.script function...
# simple_object = dict(DistinguishedName=ent.dn)
# self.*able_object(simple_object)
if QuarantineHandler.check_entity_quarantines(
self.db, self.ac.entity_id).is_locked():
return self.server.disable_object(ent.dn)
else:
return self.server.enable_object(ent.dn)
# Other change types handled by other classes:
return super(UserSync, self).process_cl_event(row)
class GroupSync(BaseSync):
"""Sync for Cerebrum groups in AD.
This contains generic functionality for handling groups for AD, to add more
functionality you need to subclass this.
TODO: Should subclasses handle distribution and security groups? How should
we treat those? Need to describe it better the specifications!
"""
# The default object class of the objects to work on. Used if not the
# config says otherwise.
default_ad_object_class = 'group'
def __init__(self, *args, **kwargs):
"""Instantiate group specific functionality."""
super(GroupSync, self).__init__(*args, **kwargs)
self.gr = Factory.get("Group")(self.db)
self.pe = Factory.get("Person")(self.db)
self.ac = Factory.get("Account")(self.db)
def configure(self, config_args):
"""Add extra configuration that is specific for groups.
@type config_args: dict
@param config_args:
Configuration data from cereconf and/or command line options.
"""
super(GroupSync, self).configure(config_args)
# Check if the group type is a valid type:
if self.config['group_type'] not in ('security', 'distribution'):
raise Exception('Invalid group type: %s' %
self.config['group_type'])
self.new_group_type = self.config['group_type'].lower()
# Check if the group scope is a valid scope:
if self.config['group_scope'].lower() not in ('global', 'universal'):
raise Exception('Invalid group scope: %s' %
self.config['group_scope'])
self.new_group_scope = self.config['group_scope'].lower()
def process_ad_object(self, ad_object):
"""Process a Group object retrieved from AD.
Do the basic sync and update the member list for the group.
"""
if not super(GroupSync, self).process_ad_object(ad_object):
return False
# ent = self.adid2entity.get(ad_object['Name'].lower())
# dn = ad_object['DistinguishedName'] # TBD: or 'Name'?
# TODO: more functionality for groups?
def post_process(self):
"""Extra sync functionality for groups."""
super(GroupSync, self).post_process()
def fetch_cerebrum_data(self):
"""Fetch data from Cerebrum that is needed for syncing groups.
What kind of data that should be gathered is up to what attributes are
set in the config to be exported. There's for instance no need to fetch
titles if the attribute Title is not used. Subclasses could however
override this, if they need such data for other usage.
"""
super(GroupSync, self).fetch_cerebrum_data()
self.fetch_posix()
self.fetch_members_by_spread()
def fetch_cerebrum_entities(self):
"""Fetch the groups from Cerebrum that should be compared against AD.
The configuration is used to know what to cache. All data is put in a
list, and each entity is put into an object from
L{Cerebrum.modules.ad2.CerebrumData} or a subclass, to make it easier
to later compare with AD objects.
Could be subclassed to fetch more data about each entity to support
extra functionality from AD and to override settings.
"""
self.logger.debug("Fetching groups with spread %s" %
(self.config['target_spread'],))
subset = self.config.get('subset')
if hasattr(self.co, 'trait_group_exempt'):
for row in self._entity_trait.list_traits(
self.co.trait_group_exempt):
self.exempt_entities.append(int(row['entity_id']))
for row in self.gr.search(spread=self.config['target_spread']):
name = row["name"]
# For testing or special cases where we only want to sync a subset
# of entities. The subset should contain the entity names, e.g.
# usernames or group names.
if subset and name not in subset:
continue
self.entities[name] = self.cache_entity(
int(row['group_id']),
name,
description=row['description'])
def _configure_group_member_spreads(self):
"""Process configuration and set needed parameters for extracting
extra AD information about group members with needed spreads.
"""
self.config['group_member_spreads'] = dict()
# There is sanity check. All spreads defined in MemberAttr should have
# their own syncs defined too
for member_atr in ConfigUtils.get_config_by_type(
self.config['attributes'], ConfigUtils.MemberAttr):
for spr in member_atr.member_spreads:
spr_name = str(spr)
if spr_name not in adconf.SYNCS:
raise Exception(
"Illegal spread in 'Member' attribute: %s. Only"
" spreads that have their own sync configured can be"
" used in the attribute" % spr_name)
if spr_name == self.config['target_spread']:
mem_obj = self
mem_config = self.config
else:
mem_obj = self.get_class(sync_type=spr_name)(self.db,
self.logger)
mem_config = adconf.SYNCS[spr_name].copy()
# Drain the list of attributes, to avoid fetching too much
# data we don't need when running the sync:
mem_config['attributes'] = {}
mem_config['sync_type'] = spr_name
mem_obj.configure(mem_config)
self.config['group_member_spreads'][spr_name] = {
'config': mem_config,
'spread': spr,
'sync': mem_obj, }
def _fetch_group_member_entities(self):
"""Extract entities with needed spreads and make AD objects out of them.
"""
self.id2extraentity = dict()
# Need to process spreads one by one, since each has its config
for spread_var in self.config['group_member_spreads'].itervalues():
spread = spread_var['spread']
self.logger.debug("Fetch members for spread: %s", spread)
mem_sync = spread_var['sync']
# Fetch Cerebrum data for all sync classes except for self:
if mem_sync != self:
self.logger.debug2("Starting member's sync of: %s", mem_sync)
mem_sync.fetch_cerebrum_data()
mem_sync.calculate_ad_values()
self.logger.debug2("Member sync done")
self.id2extraentity.update(mem_sync.id2entity)
def _fetch_person2primary_mapping(self):
"""Generate a mapping from person id to its primary account id.
TODO: This might be moved upwards to the L{BaseSync} if needed in syncs
of other entity types.
"""
self.logger.debug2('Fetch mapping of person ids to primary accounts')
# Only fetch the list once
if getattr(self, 'personid2primary', False):
return
self.personid2primary = dict((r['person_id'], r['account_id'])
for r in self.ac.list_accounts_by_type(
primary_only=True))
# A small optimisation could be to specify account_spreads for only
# returning the accounts we really need.
self.logger.debug2('Found %d persons mapped to a primary account',
len(self.personid2primary))
def _get_group_hierarchy(self, person2primary=False):
"""Get mappings of every group and every membership.
This is a costly method, as its fetches _all_ groups and _all_ its
memberships from the database. This took for instance 25 seconds for
10000 groups in the test environment. The advantage of this is that we
cache the data you would otherwise need to ask the db about for each
group.
TODO: Note that we are, by specifying L{person2primary} here,
overriding the person2primary setting for all member attributes, and
does not respect each attribute's setting of this. Might need to handle
this later, and not set it globally.
@type person2primary: bool
@param person2primary:
If set to True, every person that is a member is swapped out with
its primary account from the L{self.personid2primary} dict.
@rtype: tuple(dict, dict)
@return:
Two mappings, one from group_id to all its member_ids, and one from
member_id to all its group_ids. Both dicts contain the same data,
but both is returned for convenience.
"""
groups = dict()
mem2group = dict()
for row in self.gr.search_members():
# TODO: Should we skip entities not in either self.id2entity nor
# self.id2extraentity?
groups.setdefault(row['group_id'], set()).add(row['member_id'])
if person2primary and row['member_type'] == self.co.entity_person:
# Add persons by their primary account. Note that the primary
# account must also have the correct AD spread to be added.
account_id = self.personid2primary.get(row['member_id'])
if account_id:
self.logger.debug3("Adding person %s by primary: %s",
row['member_id'], account_id)
mem2group.setdefault(account_id,
set()).add(row['group_id'])
else:
self.logger.debug2("Person %s has no primary account",
row['member_id'])
else:
mem2group.setdefault(row['member_id'],
set()).add(row['group_id'])
return groups, mem2group
def fetch_members_by_spread(self):
"""Fetch the group members by the member spreads defined by the config.
This method only fetches what is needed. It will not fetch anything if
no L{MemberAttr} attribute is defined.
"""
if not ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.MemberAttr):
# No need for such data
return
self.logger.debug("Fetch group members by spreads...")
self._configure_group_member_spreads()
self._fetch_group_member_entities()
person2primary = False
if any(c.person2primary for c
in ConfigUtils.get_config_by_type(self.config['attributes'],
ConfigUtils.MemberAttr)):
person2primary = True
self._fetch_person2primary_mapping()
# Cache all group memberships:
groups, mem2group = self._get_group_hierarchy(person2primary)
self.logger.debug2("Mapped %d groups with members", len(groups))
self.logger.debug2("Mapped %d groups with AD spread",
len(filter(lambda x: x in self.id2entity, groups)))
self.logger.debug2("Mapped %d members in total", len(mem2group))
def get_parents_in_ad(groupid):
"""Helper method for returning a group's parent AD groups.
You will get a list of all the groups that is in this AD-sync, i.e.
has the correct AD spread, and which has the given group as a
direct or indirect member.
@type groupid: int
@param groupid:
The given group's entity_id.
@rtype: set
@return:
List of all the group-ids of the groups that has the given
group as a member, either direct or indirect. Could return an
empty set if no parents were found, or none of the parent
groups were targeted in the AD sync.
"""
ret = set()
for parent in mem2group.get(groupid, ()):
# Check if already processed, to avoid loops caused by two
# groups being (indirect) members of each others:
if parent in ret:
continue
if parent in self.id2entity:
ret.add(parent)
ret.update(get_parents_in_ad(parent))
return ret
# Go through all group memberships and add those relevant for AD in the
# proper groups, either directly or indirectly:
i = 0
for group_id, members in groups.iteritems():
# Target the parent groups if the group is not supposed to be in
# AD:
if group_id in self.id2entity:
target_groups = (group_id,)
else:
target_groups = get_parents_in_ad(group_id)
if not target_groups:
continue
# Go through each member in the group and add it to all the parent
# groups that should be in AD:
for mem in members:
# Select the primary account if the member is a person. If the
# member is some other entity, use the member:
if getattr(self, 'personid2primary', False):
mem = self.personid2primary.get(mem, mem)
member = self.id2extraentity.get(mem)
if not member:
continue
for t_id in target_groups:
ent = self.id2entity[t_id]
if not hasattr(ent, 'members_by_spread'):
# TODO: might want a set or something similar:
ent.members_by_spread = []
ent.members_by_spread.append(member)
self.logger.debug3("Added %s to group %s (originally in %s)",
member, ent, group_id)
i += 1
self.logger.debug2("Fetched %d memberships", i)
def fetch_posix(self):
"""Fetch the POSIX data for groups, if needed.
"""
if not ConfigUtils.has_config(self.config['attributes'],
ConfigUtils.PosixAttr):
# No need for any posix data
return
self.logger.debug("Fetch posix data...")
pg = Factory.get('PosixGroup')(self.db)
i = 0
for row in pg.list_posix_groups():
ent = self.id2entity.get(row['group_id'], None)
if ent:
if not hasattr(ent, 'posix'):
ent.posix = {}
ent.posix['gid'] = int(row['posix_gid']) or ''
i += 1
self.logger.debug("Found %d posix groups", i)
def start_fetch_ad_data(self, object_class=None, attributes=dict()):
"""Ask AD to start generating the data we need about groups.
Could be subclassed to get more/other data.
TODO: add attributes and object_class and maybe other settings as input
parameters.
@rtype: string
@return:
A CommandId that is the servere reference to later get the data
that has been generated.
"""
# TODO: some extra attributes to add?
return super(GroupSync, self).start_fetch_ad_data(
object_class=object_class, attributes=attributes)
def sync_ad_attribute(self, ent, attribute, cere_elements, ad_elements):
"""Compare a given attribute and update AD with the differences.
This is a generic method for updating any multivalued attribute in AD.
The given data must be given.
"""
# TODO
pass
class HostSync(BaseSync):
"""Sync for Cerebrum hosts to 'computer' objects in AD.
This contains simple functionality for adding hosts to AD. Note that this
only creates the Computer object in AD, without connecting it to a real
host. That normally happens by manually authenticating the computer in the
domain.
"""
# The default object class of the objects to work on. Used if not the
# config says otherwise.
default_ad_object_class = 'computer'
def __init__(self, *args, **kwargs):
"""Instantiate host specific functionality."""
super(HostSync, self).__init__(*args, **kwargs)
self.host = Factory.get("Host")(self.db)
def fetch_cerebrum_entities(self):
"""Fetch the entities from Cerebrum that should be compared against AD.
The configuration is used to know what to cache. All data is put in a
list, and each entity is put into an object from
L{Cerebrum.modules.ad2.CerebrumData} or a subclass, to make it easier
to later compare with AD objects.
Could be subclassed to fetch more data about each entity to support
extra functionality from AD and to override settings.
"""
self.logger.debug("Fetching hosts with spread: %s" %
(self.config['target_spread'],))
subset = self.config.get('subset')
for row in self.host.search(self.config['target_spread']):
name = row["name"]
if subset and name not in subset:
continue
self.entities[name] = self.cache_entity(int(row["host_id"]), name,
row['description'])
class MailTargetSync(BaseSync):
"""Extra sync functionality for getting MailTarget data.
Entities could be connected to mailtargets in Cerebrum, e.g. with e-mail
addresses, e-mail quota and spam settings. The retrievement of this data
should be done in this class.
"""
def __init__(self, *args, **kwargs):
"""Instantiate the MailTarget objects."""
super(MailTargetSync, self).__init__(*args, **kwargs)
self.mailtarget = Email.EmailTarget(self.db)
self.mailquota = Email.EmailQuota(self.db)
def fetch_cerebrum_data(self):
"""Fetch the needed mail data for the entities."""
super(MailTargetSync, self).fetch_cerebrum_data()
# Map from target_id to entity_id:
targetid2entityid = dict((r['target_id'], r['target_entity_id']) for r
in self.mailtarget.list_email_targets_ext())
for target_id, entity_id in targetid2entityid.iteritems():
ent = self.entities.get(entity_id)
if ent:
ent.maildata['target_id'] = target_id
# E-mail quotas:
for row in self.mailquota.list_email_quota_ext():
if row['target_id'] not in targetid2entityid:
self.logger.debug2("Ignoring quotas for non-cached target: %s",
row['target_id'])
continue
ent = self.id2entity.get(targetid2entityid[row['target_id']])
if ent:
ent.maildata['quota_soft'] = row['quota_soft']
ent.maildata['quota_hard'] = row['quota_hard']
class ProxyAddressesCompare(BaseSync):
"""Entities that have ProxyAddresses attribute should have a special
entity comparison routine.
"""
def attribute_mismatch(self, ent, atr, c, a):
"""Compare an attribute between Cerebrum and AD.
Overridden to handle ProxyAddresses specifically.
The ProxyAddresses attribute is also updated by Office365, with
addresses starting with x500. We should ignore such attributes when
comparing, to avoid having to update 20000 objects at each run. We
should only take care of SMTP addresses.
TODO: We should rather have this configurable instead of hardcoding it.
"""
if atr.lower() == 'proxyaddresses' and c and a:
advalues = list(v for v in a if not v.startswith('x500:'))
cevalues = list(c)
to_add = set(cevalues).difference(advalues)
to_remove = set(advalues).difference(cevalues)
return (to_add or to_remove, list(to_add), list(to_remove))
return super(ProxyAddressesCompare, self).attribute_mismatch(ent, atr,
c, a)
|
unioslo/cerebrum
|
Cerebrum/modules/ad2/ADSync.py
|
Python
|
gpl-2.0
| 137,332
|
from numbers_app import app
app.run(host='0.0.0.0', debug=True)
|
t4ec/numbers
|
runserver.py
|
Python
|
gpl-2.0
| 65
|
#!/usr/bin/env python
# DataSender.py
#
# Copyright (C) 2014-2019 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Functions related to sending feedback data
import os
import datetime
import json
import traceback
# Do not Import Gtk if we are not bound to an X Display
if 'DISPLAY' in os.environ:
from gi.repository import Gtk
from kano.gtk3.kano_dialog import KanoDialog
from kano_world.connection import request_wrapper, content_type_json
from kano.network import is_internet
import kano.logging as logging
from kano.logging import logger
from kano.utils import run_cmd, write_file_contents, ensure_dir, delete_dir, \
delete_file, read_file_contents, get_rpi_model
TMP_DIR = os.path.join(os.path.expanduser('~'), '.kano-feedback/')
SCREENSHOT_NAME = 'screenshot.png'
SCREENSHOT_PATH = os.path.join(TMP_DIR, SCREENSHOT_NAME)
ARCHIVE_NAME = 'bug_report.tar.gz'
ARCHIVE_PATH = os.path.join(TMP_DIR, ARCHIVE_NAME)
SEPARATOR = '-----------------------------------------------------------------'
DPKG_LOG_PATH = '/var/log/dpkg.log'
APT_LOG_PATH = '/var/log/apt/'
def send_data(text, full_info, subject='', network_send=True, logs_path=''):
"""Sends the data to our servers through a post request.
It uses :func:`~get_metadata_archive` to gather all the logs on
the system.
Args:
text (str): The description of the email when sending the logs
full_info (bool): Whether to attach all logs to the payload
subject (str): The title of the email when sending the logs
network_send (bool): Whether to send the data to our servers
logs_path (str): Path to an existing logs archive to use instead
Returns:
bool, error: Whether the operation was successful or there was
an error as returned by :func:`kano_world.functions.request_wrapper`
"""
from kano_world.functions import get_email, get_mixed_username
files = {}
# packs all the information into 'files'
if full_info:
if logs_path and os.path.exists(logs_path):
files['report'] = open(logs_path, 'rb')
else:
files['report'] = get_metadata_archive(title=subject, desc=text)
# This is the actual info: subject, text, email, username
payload = {
"text": text,
"email": get_email(),
"username": get_mixed_username(),
"category": "os",
"subject": subject
}
if not network_send:
return True, None
# send the bug report and remove all the created files
success, error, data = request_wrapper('post', '/feedback',
data=payload, files=files)
delete_tmp_dir()
if not success:
return False, error
if full_info:
# kano-profile stat collection
from kano_profile.badges import increment_app_state_variable_with_dialog
increment_app_state_variable_with_dialog('kano-feedback',
'bugs_submitted', 1)
# logs were sent, clean up
logging.cleanup()
return True, None
def delete_tmp_dir():
'''
Deletes TMP_DIR directory
'''
delete_dir(TMP_DIR)
def create_tmp_dir():
'''
Creates TMP_DIR directory
'''
ensure_dir(TMP_DIR)
def delete_screenshot():
'''
Deletes the SCREENSHOT_PATH file
'''
delete_file(SCREENSHOT_PATH)
def get_metadata_archive(title='', desc=''):
'''
It creates a file (ARCHIVE_NAME) with all the information
Returns the file
'''
ensure_dir(TMP_DIR)
file_list = [
{
'name': 'metadata.json',
'contents': json.dumps({'title': title, 'description': desc})
},
{'name': 'kanux_version.txt', 'contents': get_version()},
{'name': 'kanux_stamp.txt', 'contents': get_stamp()},
{'name': 'process.txt', 'contents': get_processes()},
{'name': 'process-tree.txt', 'contents': get_process_tree()},
{'name': 'packages.txt', 'contents': get_packages()},
{'name': 'dmesg.txt', 'contents': get_dmesg()},
{'name': 'syslog.txt', 'contents': get_syslog()},
{
'name': 'cmdline.txt',
'contents': read_file_contents('/boot/cmdline.txt')
},
{
'name': 'config.txt',
'contents': read_file_contents('/boot/config.txt')
},
{'name': 'wifi-info.txt', 'contents': get_wifi_info()},
{'name': 'usbdevices.txt', 'contents': get_usb_devices()},
# TODO: Remove raw logs when json ones become stable
{'name': 'app-logs.txt', 'contents': get_app_logs_raw()},
{'name': 'app-logs-json.txt', 'contents': get_app_logs_json()},
{'name': 'hdmi-info.txt', 'contents': get_hdmi_info()},
{'name': 'edid.dat', 'contents': get_edid()},
{'name': 'screen-log.txt', 'contents': get_screen_log()},
{'name': 'xorg-log.txt', 'contents': get_xorg_log()},
{'name': 'cpu-info.txt', 'contents': get_cpu_info()},
{'name': 'mem-stats.txt', 'contents': get_mem_stats()},
{'name': 'lsof.txt', 'contents': get_lsof()},
{'name': 'content-objects.txt', 'contents': get_co_list()},
{'name': 'disk-space.txt', 'contents': get_disk_space()},
{'name': 'lsblk.txt', 'contents': get_lsblk()},
{'name': 'sources-list.txt', 'contents': get_sources_list()},
]
file_list += get_install_logs()
# Include the screenshot if it exists
if os.path.isfile(SCREENSHOT_PATH):
file_list.append({
'name': SCREENSHOT_NAME,
'contents': read_file_contents(SCREENSHOT_PATH)
})
# Collect all coredumps, for applications that terminated unexpectedly
for f in os.listdir('/var/tmp/'):
if f.startswith('core-'):
file_list.append({
'name': f,
'contents': read_file_contents(os.path.join('/var/tmp', f))
})
# create files for each non empty metadata info
for file in file_list:
if file['contents']:
write_file_contents(
os.path.join(TMP_DIR, file['name']), file['contents']
)
# archive all the metadata files
import tarfile
with tarfile.open(ARCHIVE_PATH, mode='w') as archive:
for f in os.listdir(TMP_DIR):
archive.add(os.path.join(TMP_DIR, f), arcname=f)
# open the file and return it
archive = open(ARCHIVE_PATH, 'rb')
return archive
def get_version():
'''
Return a string with the current version of the OS.
Uses the command kanux_version
'''
cmd = "ls -l /etc/kanux_version | awk '{ print $6 \" \" $7 \" \" $8 }' && cat /etc/kanux_version"
o, _, _ = run_cmd(cmd)
return o
def get_stamp():
"""Get the initial starting version of the OS.
Returns:
str: The contents of the kanux_stamp file
"""
return read_file_contents('/boot/kanux_stamp') or ''
def get_processes():
'''
Returns a string with the current processes running in the system
'''
cmd = "ps -Ao user,pid,pcpu,pmem,vsz,rss,tty=TTY,tmout,f=FLAGS,wchan=EXTRA-WIDE-WCHAN-COLUMN,stat,start_time,time,args"
o, _, _ = run_cmd(cmd)
return o
def get_process_tree():
'''
Returns a string with the processes tree of the system
'''
cmd = "pstree -apl"
o, _, _ = run_cmd(cmd)
return o
def get_packages():
'''
Returns a string with the list of packages installed in the system
'''
cmd = "dpkg-query -l"
o, _, _ = run_cmd(cmd)
return o
def get_dmesg():
'''
Returns a string with dmesg and uptime info
'''
cmd_dmesg = "dmesg"
cmd_uptime = "uptime"
d, _, _ = run_cmd(cmd_dmesg)
t, _, _ = run_cmd(cmd_uptime)
t = 'system uptime: %s' % t
return '%s\n%s' % (d, t)
def get_syslog():
'''
Returns the last 1000 lines of syslog messages
'''
cmd = "sudo journalctl -b | tail -n 1000"
o, _, _ = run_cmd(cmd)
return o
def get_wpalog():
'''
Returns the last 300 lines of the wpa log
'''
cmd = "tail -n 300 /var/log/kano_wpa.log"
o, _, _ = run_cmd(cmd)
return o
def get_wlaniface():
'''
Retruns a string with wlan info
'''
cmd = "/sbin/iwconfig wlan0"
o, _, _ = run_cmd(cmd)
return o
def get_xorg_log():
'''
Returns a string with the Xorg log
'''
cmd = "cat /var/log/Xorg.0.log"
o, _, _ = run_cmd(cmd)
return o
def get_cpu_info():
'''
Returns a string with the cpuid and the board model
'''
cmd = "/usr/bin/rpi-info"
o, _, _ = run_cmd(cmd)
o += '\nModel: {}'.format(get_rpi_model())
return o
def get_mem_stats():
"""
Get information about memory usage
"""
mem_stats = ''
out, _, _ = run_cmd('free --human --lohi --total')
mem_stats += out + '\n'
out, _, _ = run_cmd('vcgencmd get_mem arm')
mem_stats += out
out, _, _ = run_cmd('vcgencmd get_mem gpu')
mem_stats += out
out, _, _ = run_cmd('vcgencmd get_mem reloc')
mem_stats += out
out, _, _ = run_cmd('vcgencmd get_mem reloc_total')
mem_stats += out
out, _, _ = run_cmd('vcgencmd get_mem malloc')
mem_stats += out
out, _, _ = run_cmd('vcgencmd get_mem malloc_total')
mem_stats += out + '\n'
out, _, _ = run_cmd('vcgencmd mem_reloc_stats')
mem_stats += out + '\n'
out, _, _ = run_cmd('cat /proc/meminfo')
mem_stats += out + '\n'
out, _, _ = run_cmd('ps -eo pmem,args --no-headers --sort -pmem')
mem_stats += out
return mem_stats
def get_lsof():
'''
Get lsof information (list of open files)
'''
cmd = "sudo /usr/bin/lsof"
o, _, _ = run_cmd(cmd)
return o
def get_app_logs_raw():
'''
Extract kano logs in raw format:
"LOGFILE: component" (one line per component)
followed by entries in the form:
"2014-09-30T10:18:54.532015 kano-updater info: Return value: 0"
'''
logs = logging.read_logs()
output = ""
for f, data in logs.iteritems():
app_name = os.path.basename(f).split(".")[0]
output += "LOGFILE: {}\n".format(f)
for line in data:
line["time"] = datetime.datetime.fromtimestamp(line["time"]).isoformat()
output += "{time} {app} {level}: {message}\n".format(app=app_name, **line)
return output
def get_app_logs_json():
'''
Return a JSON stream with the kano logs
'''
# Fetch the kano logs
kano_logs = logging.read_logs()
# Transform them into a sorted, indented json stream
kano_logs_json = json.dumps(kano_logs, sort_keys=True, indent=4,
separators=(',', ': '))
return kano_logs_json
def get_kwifi_cache():
'''
Send wifi cache data
NOTE: We do not collect sensitive private information.
Keypass is sent as "obfuscated" literal.
'''
cmd = "cat /etc/kwifiprompt-cache.conf | sed 's/\"enckey\":.*/\"enckey\": \"obfuscated\"/'"
o, _, _ = run_cmd(cmd)
return o
def get_usb_devices():
'''
Gives us 2 short lists of usb devices:
1) deviceIDs and manufacturer strings
2) Hierarchy mode along with matching kernel drivers controlling each device
We can know for wireless dongles and HIDs which kernel driver is loaded.
'''
cmd = "lsusb && lsusb -t"
o, _, _ = run_cmd(cmd)
return o
def get_networks_info():
'''
Returns a string with ifconfig info
'''
cmd = "/sbin/ifconfig"
o, _, _ = run_cmd(cmd)
return o
def get_wifi_info():
'''
Returns a string with wifi specific info
'''
from kano_world.functions import get_mixed_username
# Get username here
world_username = "Kano World username: {}\n\n".format(get_mixed_username())
kwifi_cache = "**kwifi_cache**\n {}\n\n".format(get_kwifi_cache())
wlaniface = "**wlaniface**\n {}\n\n".format(get_wlaniface())
ifconfig = "**ifconfig**\n {}\n\n".format(get_networks_info())
wpalog = "**wpalog**\n {}\n\n".format(get_wpalog())
return world_username + kwifi_cache + wlaniface + ifconfig + wpalog
def get_edid():
file_path = os.path.join(TMP_DIR, 'edid.dat')
cmd = "tvservice -d {}".format(file_path)
run_cmd(cmd)
try:
dat = read_file_contents(file_path)
delete_file(file_path)
return dat
except:
return "EMPTY"
def get_hdmi_info():
'''
Returns a string with Display info
'''
# Current resolution
cmd = "tvservice -s"
o, _, _ = run_cmd(cmd)
res = 'Current resolution: {}\n\n'.format(o)
# edid file
file_path = os.path.join(TMP_DIR, 'edid.dat')
cmd = "tvservice -d {} && edidparser {}".format(file_path, file_path)
edid, _, _ = run_cmd(cmd)
delete_file(file_path)
return res + edid
def get_screen_log():
"""Get display information using kano-settings display functions.
Returns:
dict: An aggregate of display characteristics
"""
try:
from kano_settings.system.display import get_edid, get_edid_name, get_status, \
list_supported_modes, get_optimal_resolution_mode, override_models
edid = get_edid()
model = get_edid_name(use_cached=False)
override_models(edid, model)
status = get_status()
supported = list_supported_modes(use_cached=False)
optimal = get_optimal_resolution_mode(edid, supported)
log_data = {
'model': model,
'status': status,
'edid': edid,
'supported': supported,
'optimal': optimal,
}
log = json.dumps(log_data, sort_keys=True, indent=4)
except:
return traceback.format_exc()
return log
def get_co_list():
'''
Returns a list of content object IDs currently on the system.
'''
try:
from kano_content.api import ContentManager
cm = ContentManager.from_local()
objects = cm.list_local_objects(active_only=False, inactive_only=False)
return str(objects)
except:
return "Couldn't get a list of content objects."
def get_disk_space():
cmd = "df -h"
o, _, _ = run_cmd(cmd)
return o
def get_lsblk():
cmd = "lsblk"
o, _, _ = run_cmd(cmd)
return o
def get_sources_list():
SRC_LIST_FILE = '/etc/apt/sources.list'
SRC_LIST_DIR = '/etc/apt/sources.list.d'
src_files = [SRC_LIST_FILE] if os.path.exists(SRC_LIST_FILE) else []
src_filenames = sorted(os.listdir(SRC_LIST_DIR)) \
if os.path.isdir(SRC_LIST_DIR) else []
src_files += [os.path.join(SRC_LIST_DIR, src) for src in src_filenames]
output = []
for src_file in src_files:
if not os.path.isfile(src_file):
continue
output.append('Source file: {}'.format(src_file))
output.append(SEPARATOR)
with open(src_file, 'r') as src_f:
output.append(src_f.read())
output.append(SEPARATOR)
output.append('')
return '\n'.join(output)
def get_install_logs():
log_list = []
log_files = [DPKG_LOG_PATH]
if os.path.exists(os.path.dirname(APT_LOG_PATH)):
log_files += [
os.path.join(APT_LOG_PATH, log_f)
for log_f in os.listdir(APT_LOG_PATH)
]
for log_file in log_files:
if not os.path.exists(log_file):
continue
try:
with open(log_file, 'r') as log_f:
contents = log_f.read()
log_list.append(
{
'name': 'install-{}'.format(os.path.basename(log_file)),
'contents': contents
}
)
except Exception:
pass
return log_list
def take_screenshot():
'''
Takes a screenshot and saves it into SCREENSHOT_PATH
'''
ensure_dir(TMP_DIR)
cmd = "kano-screenshot -w 1024 -p " + SCREENSHOT_PATH
_, _, rc = run_cmd(cmd)
def copy_screenshot(filename):
'''
Copies screenshot 'filename' into SCREENSHOT_PATH
'''
ensure_dir(TMP_DIR)
if os.path.isfile(filename):
run_cmd("cp %s %s" % (filename, SCREENSHOT_PATH))
def copy_archive_report(target_archive):
'''
Copies source archive (TMP_DIR/ARCHIVE_NAME) into target_archive
'''
ensure_dir(TMP_DIR)
source_archive = os.path.join(TMP_DIR, ARCHIVE_NAME)
if os.path.isfile(source_archive):
_, _, rc = run_cmd("cp %s %s" % (source_archive, target_archive))
return (rc == 0)
else:
return False
def sanitise_input(text):
'''
Cleans a string(text) from double quotes
'''
# Replace double quotation mark for singles
text = text.replace('"', "'")
# Fix upload error when data field begins with " or '
if text[:1] == '"' or text[:1] == "'":
text = " " + text
return text
def try_login():
'''
Returns login status.
If user is not logged in the first time, the logger will be launched
'''
from kano_world.functions import is_registered
# Check if user is registered
if not is_registered():
_, _, rc = run_cmd('kano-login 3', localised=True)
return is_registered()
def try_connect():
'''
Returns internet status.
If connection fails the first time, the WiFi config will be launched
'''
if is_internet():
return True
run_cmd('sudo /usr/bin/kano-settings 12', localised=True)
return is_internet()
def send_question_response(answers, interactive=True, tags=['os', 'feedback-widget'],
debug=False, dry_run=False):
'''
This function is used by the Feedback widget to network-send the responses.
The information (question_id, answer, username and email) is sent to a Kano API endpoint.
answers is a list of tuples each having a Question ID and an Answer literal.
The answers will be all packed into a payload object and sent in one single network transaction.
'''
from kano_world.functions import get_email, get_mixed_username
ok_msg_title = _('Thank you') # noqa: F821
ok_msg_body = _( # noqa: F821
'We will use your feedback to improve your experience'
)
if interactive and not try_connect() or not try_login():
# The answer will be saved as offline, act as if it was sent correctly
thank_you = KanoDialog(ok_msg_title, ok_msg_body)
thank_you.dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
thank_you.run()
return False
payload = {
'email': get_email(),
'username': get_mixed_username(),
'answers': [
{
'question_id': answer[0],
'text': answer[1],
'tags': tags
} for answer in answers
]
}
if debug:
print 'PAYLOAD construction:'
print json.dumps(payload, sort_keys=True,
indent=4, separators=(',', ': '))
# Send the answers unless we are testing the API
if dry_run:
return True
success, error, dummy = request_wrapper('post', '/questions/responses',
data=json.dumps(payload),
headers=content_type_json)
# Retry on error only if in GUI mode
if not success:
logger.error('Error while sending feedback: {}'.format(error))
if not interactive:
return False
retry = KanoDialog(
title_text=_('Unable to send'), # noqa: F821
description_text=_( # noqa: F821
'Error while sending your feedback. Do you want to retry?'
),
button_dict={
_('Close feedback').upper(): { # noqa: F821
'return_value': False,
'color': 'red'
},
_('Retry').upper(): { # noqa: F821
'return_value': True,
'color': 'green'
}
}
)
if retry.run():
# Try again until they say no
return send_question_response([(answer[0], answer)], interactive=interactive)
return False
if interactive:
thank_you = KanoDialog(ok_msg_title, ok_msg_body)
thank_you.dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
thank_you.run()
return True
|
KanoComputing/kano-feedback
|
kano_feedback/DataSender.py
|
Python
|
gpl-2.0
| 20,436
|
#!/usr/bin/env python
import os
from PIL import Image
rootdir = '/srv/johnwiseheart.me/projects/bom2/images/'
for subdir, dirs, files in os.walk(rootdir):
for file in files:
try:
im = Image.open(os.path.join(subdir, file))
im.verify()
except Exception as e:
os.remove(os.path.join(subdir, file))
print e
|
johnwiseheart/bomslider
|
clean.py
|
Python
|
gpl-2.0
| 374
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bc.ui'
#
# Created: Fri Dec 4 16:24:50 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_bcUI(object):
def setupUi(self, bcUI):
bcUI.setObjectName(_fromUtf8("bcUI"))
bcUI.resize(287, 514)
font = QtGui.QFont()
font.setPointSize(9)
bcUI.setFont(font)
bcUI.setWidgetResizable(True)
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 285, 512))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.splitter = QtGui.QSplitter(self.scrollAreaWidgetContents)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.groupBox = QtGui.QGroupBox(self.splitter)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.listWidget = QtGui.QListWidget(self.groupBox)
self.listWidget.setObjectName(_fromUtf8("listWidget"))
self.verticalLayout.addWidget(self.listWidget)
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.layoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(self.layoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.comboBox = QtGui.QComboBox(self.layoutWidget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout.addWidget(self.comboBox, 0, 1, 1, 1)
self.tabWidget = QtGui.QTabWidget(self.layoutWidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.gridLayout.addWidget(self.tabWidget, 1, 0, 1, 2)
self.verticalLayout_2.addWidget(self.splitter)
self.pushButton = QtGui.QPushButton(self.scrollAreaWidgetContents)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("images/fromHelyx/save16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton.setIcon(icon)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.verticalLayout_2.addWidget(self.pushButton)
bcUI.setWidget(self.scrollAreaWidgetContents)
self.retranslateUi(bcUI)
self.tabWidget.setCurrentIndex(-1)
QtCore.QObject.connect(self.listWidget, QtCore.SIGNAL(_fromUtf8("itemSelectionChanged()")), bcUI.changeSelection)
QtCore.QObject.connect(self.comboBox, QtCore.SIGNAL(_fromUtf8("currentIndexChanged(QString)")), bcUI.changePrototype)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("pressed()")), bcUI.saveBCs)
QtCore.QObject.connect(self.listWidget, QtCore.SIGNAL(_fromUtf8("itemDoubleClicked(QListWidgetItem*)")), bcUI.changePatchType)
QtCore.QMetaObject.connectSlotsByName(bcUI)
def retranslateUi(self, bcUI):
bcUI.setWindowTitle(_translate("bcUI", "ScrollArea", None))
self.groupBox.setTitle(_translate("bcUI", "Boundaries", None))
self.label.setText(_translate("bcUI", "Prototype:", None))
self.pushButton.setText(_translate("bcUI", "Apply", None))
class bcUI(QtGui.QScrollArea, Ui_bcUI):
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QScrollArea.__init__(self, parent, f)
self.setupUi(self)
|
jmarcelogimenez/petroFoam
|
bc_ui.py
|
Python
|
gpl-2.0
| 4,337
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# --- trame.py ---
# Author : Samuel Bucquet
# Date : 31.10.2014
# License : GPLv2
from collections import namedtuple, OrderedDict
import struct
import time
# coeffs
slong2deg = 180.0/(2**31)
sshort2deg = 180.0/(2**15)
ushort2deg = 360.0/(2**16)
class Trame(object):
name = ''
fieldsname = []
first_bytes = ''
def __init__(self, fd):
self.framedesc = namedtuple(self.name, self.fieldsname)
self.datas = OrderedDict(zip(self.fieldsname, [0]*len(self.fieldsname)))
self.size = 0
self.fd = fd
def get_crctrame(self, fields):
pass
def do_crc(self, buff):
pass
def add_crc(self, buff):
pass
def get_fields(self, buff):
pass
def build_fields(self):
pass
def process_fields(self, fields):
pass
def read(self):
pass
def parse(self, buff):
fields = self.get_fields(buff)
if self.get_crctrame(fields) != self.do_crc(buff):
return None
fields = self.process_fields(fields)
return self.framedesc._make(fields)
def __iter__(self):
return self
def next(self):
while True:
return self.read()
class BinaryTrame(Trame):
struct_fmt = ""
def __init__(self, fd):
super().__init__(self, fd)
self.struct = struct.Struct(self.struct_fmt)
self.size = self.struct.size
def read(self, fd):
buff = fd.read(self.size)
sync_pos = buff.find(self.first_bytes)
if sync_pos == -1:
# impossible de trouver un marqueur de trame
return None
# else
ts = time.time()
if sync_pos != 0:
buff = buff[sync_pos] + fd.read(sync_pos)
trame = self.parse(buff)
if trame is None:
# Trame invalide (bad crc)
return None
self.timestamp = ts
self.datas = trame
return trame
def get_fields(self, buff):
return list(self.struct.unpack(buff))
def build(self):
fields = self.build_fields()
buff = self.first_bytes + self.struct.pack(*[fields[n] for n in self.fieldsname])
return self.add_crc(buff)
class AsciiTrame(Trame):
delim = ','
def __init__(self, fd):
super().__init__(self, fd)
def read(self, fd):
line = fd.readline()
ts = time.time()
trame = self.parse(line)
if trame is None:
# Trame invalide (bad crc)
return None
self.timestamp = ts
self.datas = trame
return trame
def get_fields(self, line):
return line.strip().split(self.delim)
def build(self):
pass
|
samgratte/SimpleSHM
|
trame.py
|
Python
|
gpl-2.0
| 2,753
|
# -*- coding: utf-8 -*-
#
from django.dispatch import Signal
on_app_ready = Signal()
|
liuzheng712/jumpserver
|
apps/assets/signals.py
|
Python
|
gpl-2.0
| 86
|
# -*- coding: utf-8 -*-
"""
Estnltk prettyprinter module.
Deals with rendering Text instances as HTML.
"""
from __future__ import unicode_literals, print_function, absolute_import
from .values import AESTHETICS, VALUES, AES_VALUE_MAP, DEFAULT_VALUE_MAP, LEGAL_ARGUMENTS
from .templates import get_mark_css, HEADER, MIDDLE, FOOTER, DEFAULT_MARK_CSS
from .marker import mark_text
from .rules import create_rules
from cached_property import cached_property
import six
def assert_legal_arguments(kwargs):
"""Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
"""
seen_layers = set()
for k, v in kwargs.items():
if k not in LEGAL_ARGUMENTS:
raise ValueError('Illegal argument <{0}>!'.format(k))
if k in AESTHETICS:
if v in seen_layers:
raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v))
seen_layers.add(v)
if k in VALUES:
if not isinstance(v, six.string_types) and not isinstance(v, list):
raise ValueError('Value <{0}> must be either string or list'.format(k))
if isinstance(v, list):
if len(v) == 0:
raise ValueError('Rules cannot be empty list')
for rule_matcher, rule_value in v:
if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types):
raise ValueError('Rule tuple elements must be strings')
def parse_arguments(kwargs):
"""Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping.
"""
aesthetics = {}
values = {}
for aes in AESTHETICS:
if aes in kwargs:
aesthetics[aes] = kwargs[aes]
val_name = AES_VALUE_MAP[aes]
# map the user-provided CSS value or use the default
values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes])
return aesthetics, values
class PrettyPrinter(object):
"""Class for formatting Text instances as HTML & CSS."""
def __init__(self, **kwargs):
"""Initialize a new PrettyPrinter class.
Parameters
----------
color: str or callable
Layer that corresponds to color aesthetic.
background: str or callable
Layer that corresponds to background.
...
color_value: str or list
The alternative value for the color.
background_value: str or list
The background value for the color.
"""
assert_legal_arguments(kwargs)
self.__aesthetics, self.__values = parse_arguments(kwargs)
self.__rules = dict((aes, create_rules(aes, self.values[aes])) for aes in self.aesthetics)
@cached_property
def aesthetics(self):
"""Mapping of aesthetics mapped to layers."""
return self.__aesthetics
@cached_property
def values(self):
"""Mapping of aesthetic values."""
return self.__values
@cached_property
def rules(self):
return self.__rules
@cached_property
def css(self):
"""Returns
-------
str
The CSS.
"""
css_list = [DEFAULT_MARK_CSS]
for aes in self.aesthetics:
css_list.extend(get_mark_css(aes, self.values[aes]))
#print('\n'.join(css_list))
return '\n'.join(css_list)
def render(self, text, add_header=False):
"""Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
"""
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
#print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER)))
return html
|
estnltk/estnltk
|
estnltk/prettyprinter/prettyprinter.py
|
Python
|
gpl-2.0
| 4,437
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
vfkPluginDialog
A QGIS plugin
Plugin umoznujici praci s daty katastru nemovitosti
-------------------
begin : 2015-06-11
git sha : $Format:%H$
copyright : (C) 2015 by Stepan Bambula
email : stepan.bambula@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from __future__ import absolute_import
from builtins import range
from qgis.PyQt.QtCore import QRegExp
from .vfkDocument import *
class RichTextDocument(VfkDocument):
# static variables
defaultTableAttributes = "border=\"0\" cellspacing=\"1px\" cellpadding=\"0\""
defaultCssStyle = """
body{
background-color: white;
color: black;
}
table th{
background-color: #ffbb22;
padding: 3px;
}
table td{
padding: 3px;
}
table tr td.oddRow{
background-color: #ffff55;
}
table tr td.evenRow{
background-color: #ffff99;
}"""
def __init__(self):
super(RichTextDocument, self).__init__()
self.__mPage = u""
self.__mLastColumnNumber = 0
self.__mCurrentTableRowNumber = 0
def __currentTableRowCssClass(self):
return u"evenRow" if self.__mCurrentTableRowNumber % 2 == 0 else u"oddRow"
def toString(self):
return self.__mPage
def header(self):
self.__mPage += u"<html><head>"
self.__mPage += u"<style>" + self.defaultCssStyle + u"</style>"
self.__mPage += u"</head><body>"
def footer(self):
self.__mPage += u"</body></html>"
def heading1(self, text):
self.__mPage += u"<h1>{}</h1>".format(text)
def heading2(self, text):
self.__mPage += u"<h2>{}</h2>".format(text)
def heading3(self, text):
self.__mPage += u"<h3>{}</h3>".format(text)
def beginItemize(self):
self.__mPage += u"<ul>"
def endItemize(self):
self.__mPage += u"</ul>"
def beginItem(self):
self.__mPage += u"<li>"
def endItem(self):
self.__mPage += u"</li>"
def item(self, text):
self.__mPage += u"<li>{}</li>".format(text)
def beginTable(self):
self.__mPage += u"<table " + self.defaultTableAttributes + u">"
self.__mCurrentTableRowNumber = 1
def endTable(self):
self.__mPage += u"</table>"
def tableHeader(self, columns):
self.__mPage += u"<tr>"
for column in columns:
self.__mPage += u"<th>{}</th>".format(column)
self.__mPage += u"</tr>"
self.__mLastColumnNumber = len(columns)
def tableRow(self, columns):
self.__mPage += u"<tr>"
for column in columns:
self.__mPage += u"<td class=\"{}\">{}</td>".format(
self.__currentTableRowCssClass(), column)
self.__mPage += u"</tr>"
self.__mLastColumnNumber = len(columns)
self.__mCurrentTableRowNumber += 1
def tableRowOneColumnSpan(self, text):
self.__mPage += u"<tr>"
self.__mPage += u"<td colspan=\"{}\" class=\"{}\">{}</td>".format(self.__mLastColumnNumber,
self.__currentTableRowCssClass(), text)
self.__mPage += u"</tr>"
self.__mCurrentTableRowNumber += 1
def link(self, href, text):
return u"<a href=\"{}\">{}</a>".format(href, text)
def superScript(self, text):
return u"<sup>{}</sup>".format(text)
def newLine(self):
return u"<br/>"
def keyValueTable(self, content):
self.beginTable()
for it in content:
self.tableRow([it.first, it.second])
self.endTable()
def paragraph(self, text):
self.__mPage += u"<p>{}</p>".format(text)
def table(self, content, header):
"""
:param content: list
:param header: bool
"""
self.beginTable()
i = 0
if header and content:
self.tableHeader(content[0])
i += 1
for j in range(i, len(content)):
self.tableRow(content[j])
self.endTable()
def text(self, text):
self.__mPage += text
def discardLastBeginTable(self):
index = self.__mPage.rfind("<table")
self.__mPage = self.__mPage[:index]
def isLastTableEmpty(self):
if self.__mPage.find(QRegExp("<table[^>]*>$")) != -1:
return True
else:
return False
|
ctu-osgeorel/qgis-vfk-plugin
|
richTextDocument.py
|
Python
|
gpl-2.0
| 5,331
|
from django.conf import settings
from rest_framework import serializers
from misago.acl.testutils import override_acl
from misago.categories.models import Category
from misago.users.testutils import AuthenticatedUserTestCase
from .. import testutils
from ..api.postingendpoint import PostingEndpoint
from ..api.postingendpoint.attachments import AttachmentsMiddleware, validate_attachments_count
from ..models import Attachment, AttachmentType
class RequestMock(object):
def __init__(self, data=None):
self.data = data or {}
class AttachmentsMiddlewareTests(AuthenticatedUserTestCase):
def setUp(self):
super(AttachmentsMiddlewareTests, self).setUp()
self.category = Category.objects.get(slug='first-category')
self.post = testutils.post_thread(
category=self.category
).first_post
self.post.update_fields = []
self.override_acl()
self.filetype = AttachmentType.objects.order_by('id').last()
def override_acl(self, new_acl=None):
override_acl(self.user, new_acl or {
'max_attachment_size': 1024
})
def mock_attachment(self, user=True, post=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
filetype=self.filetype,
post=post,
size=1000,
uploader=self.user if user else None,
uploader_name=self.user.username,
uploader_slug=self.user.slug,
uploader_ip='127.0.0.1',
filename='testfile_{}.zip'.format(Attachment.objects.count() + 1),
)
def test_use_this_middleware(self):
"""use_this_middleware returns False if we can't upload attachments"""
middleware = AttachmentsMiddleware(user=self.user)
self.override_acl({
'max_attachment_size': 0
})
self.assertFalse(middleware.use_this_middleware())
self.override_acl({
'max_attachment_size': 1024
})
self.assertTrue(middleware.use_this_middleware())
def test_middleware_is_optional(self):
"""middleware is optional"""
INPUTS = (
{},
{'attachments': []}
)
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock(test_input),
mode=PostingEndpoint.START,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
def test_middleware_validates_ids(self):
"""middleware validates attachments ids"""
INPUTS = (
'none',
['a', 'b', 123],
range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1)
)
for test_input in INPUTS:
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': test_input
}),
mode=PostingEndpoint.START,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
self.assertFalse(serializer.is_valid(), "%r shouldn't validate" % test_input)
def test_get_initial_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post)
self.assertEqual(attachments, [])
attachment = self.mock_attachment(post=self.post)
attachments = serializer.get_initial_attachments(
middleware.mode, middleware.user, middleware.post)
self.assertEqual(attachments, [attachment])
def test_get_new_attachments(self):
"""get_initial_attachments returns list of attachments already existing on post"""
middleware = AttachmentsMiddleware(
request=RequestMock(),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
attachments = serializer.get_new_attachments(middleware.user, [1, 2, 3])
self.assertEqual(attachments, [])
attachment = self.mock_attachment()
attachments = serializer.get_new_attachments(middleware.user, [attachment.pk])
self.assertEqual(attachments, [attachment])
# only own orphaned attachments may be assigned to posts
other_user_attachment = self.mock_attachment(user=False)
attachments = serializer.get_new_attachments(middleware.user, [other_user_attachment.pk])
self.assertEqual(attachments, [])
def test_cant_delete_attachment(self):
"""middleware validates if we have permission to delete other users attachments"""
self.override_acl({
'max_attachment_size': 1024,
'can_delete_other_users_attachments': False
})
attachment = self.mock_attachment(user=False, post=self.post)
self.assertIsNone(attachment.uploader)
serializer = AttachmentsMiddleware(
request=RequestMock({'attachments': []}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
).get_serializer()
self.assertFalse(serializer.is_valid())
def test_add_attachments(self):
"""middleware adds attachments to post"""
attachments = [
self.mock_attachment(),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [a.pk for a in attachments]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = list(reversed([a.filename for a in attachments]))
self.assertEqual([a['filename'] for a in self.post.attachments_cache], attachments_filenames)
def test_remove_attachments(self):
"""middleware removes attachment from post"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 1)
attachments_filenames = [attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache], attachments_filenames)
def test_edit_attachments(self):
"""middleware removes and adds attachments to post"""
attachments = [
self.mock_attachment(post=self.post),
self.mock_attachment(post=self.post),
self.mock_attachment(),
]
middleware = AttachmentsMiddleware(
request=RequestMock({
'attachments': [attachments[0].pk, attachments[2].pk]
}),
mode=PostingEndpoint.EDIT,
user=self.user,
post=self.post
)
serializer = middleware.get_serializer()
self.assertTrue(serializer.is_valid())
middleware.save(serializer)
# attachments were associated with post
self.assertEqual(self.post.update_fields, ['attachments_cache'])
self.assertEqual(self.post.attachment_set.count(), 2)
attachments_filenames = [attachments[2].filename, attachments[0].filename]
self.assertEqual([a['filename'] for a in self.post.attachments_cache], attachments_filenames)
class ValidateAttachmentsCountTests(AuthenticatedUserTestCase):
def test_validate_attachments_count(self):
"""too large count of attachments is rejected"""
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT))
with self.assertRaises(serializers.ValidationError):
validate_attachments_count(range(settings.MISAGO_POST_ATTACHMENTS_LIMIT + 1))
|
1905410/Misago
|
misago/threads/tests/test_attachments_middleware.py
|
Python
|
gpl-2.0
| 9,002
|
#!/usr/bin/python
#generate the srs reprot sheet
#command line:
#xml_srs.py <the SRS blank file> <xml file you want output>
#e.g.
#./xml_srs.py ../mx50_test_manual/SRS/mx508_SRS_release SRS_release.xml
import sys
fin = sys.argv[1]
fon = sys.argv[2]
start = 0
count = 0
hsect = 0
try:
fi = open(fin, 'r')
fo = open(fon, 'w')
#write head
fo.write("<?xml-stylesheet type=\"text/xsl\" href=\"srs_list.xsl\"?>\n");
fo.write("<chapter name=\"SRS\"><title>SRS List</title>\n");
for line in fi:
pt = line.find("\t")
if ( line.find("_") != -1 ):
head = line.find("\t")
hs = line[:head]
if (start == 1):
fo.write("]]></para></formalpara>\n")
fo.write("<formalpara name=\"" + hs + "\" count=\"" + str(count) + "\" >")
fo.write("\n<title>" + hs + "</title>\n")
fo.write("<para><![CDATA[\n")
fo.write(line)
start = 1
count = count + 1
elif (line.find("\t") == 0):
if(start == 1):
fo.write("]]></para></formalpara>\n")
start = 0
if (hsect == 1):
fo.write("</sect1>\n")
hsect = 0
if (line.strip() != ''):
fo.write("<sect1>")
fo.write("<title><![CDATA[")
fo.write(line.strip())
fo.write("]]></title>")
hsect = 1
else:
if(start == 1):
fo.write(line)
if(start == 1):
fo.write("]]></para></formalpara>\n")
start = 0
if (hsect == 1):
fo.write("</sect1>\n")
hsect = 0
fo.write("</chapter>")
count = 0
finally:
fi.close()
fo.close()
|
wanghao-xznu/vte
|
tools/docbook/xml_srs.py
|
Python
|
gpl-2.0
| 1,424
|
"""
WSGI config for sampleapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sampleapp.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise # for heroku
application = get_wsgi_application()
application = DjangoWhiteNoise(application) # for heroku
|
feifangit/dj-mongo-reader
|
example/sampleapp/sampleapp/wsgi.py
|
Python
|
gpl-2.0
| 512
|
#!/usr/bin/env python
import unittest
from werkzeug.exceptions import Forbidden
from .util import generate_ll
class PurgeAllDeletedTasksTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
def test_purges_all_deleted_tasks(self):
# given
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
t1 = self.pl.create_task('t1')
self.pl.add(t1)
t1.is_deleted = True
t2 = self.pl.create_task('t2')
self.pl.add(t2)
t3 = self.pl.create_task('t3')
self.pl.add(t3)
t3.is_deleted = True
self.pl.commit()
# precondition
self.assertTrue(t1.is_deleted)
self.assertFalse(t2.is_deleted)
self.assertTrue(t3.is_deleted)
self.assertEqual(3, self.pl.count_tasks())
self.assertEqual(2, self.pl.count_tasks(is_deleted=True))
self.assertEqual(1, self.pl.count_tasks(is_deleted=False))
self.assertTrue(admin.is_admin)
# when
result = self.ll.purge_all_deleted_tasks(admin)
# then
self.assertEqual(1, self.pl.count_tasks())
self.assertEqual(0, self.pl.count_tasks(is_deleted=True))
self.assertEqual(1, self.pl.count_tasks(is_deleted=False))
self.assertIs(t2, list(self.pl.get_tasks())[0])
# and
self.assertEqual(2, result)
def test_non_admin_raises(self):
# given
user = self.pl.create_user('user@example.com', is_admin=False)
self.pl.add(user)
task = self.pl.create_task('task')
self.pl.add(task)
task.is_deleted = True
self.pl.commit()
# precondition
self.assertFalse(user.is_admin)
# expect
self.assertRaises(
Forbidden,
self.ll.purge_all_deleted_tasks,
user)
def test_task_not_deleted_then_not_purged(self):
# given
admin = self.pl.create_user('admin@example.com', is_admin=True)
self.pl.add(admin)
task = self.pl.create_task('task')
self.pl.add(task)
self.pl.commit()
# precondition
self.assertFalse(task.is_deleted)
self.assertEqual(1, self.pl.count_tasks())
self.assertEqual(0, self.pl.count_tasks(is_deleted=True))
self.assertEqual(1, self.pl.count_tasks(is_deleted=False))
self.assertTrue(admin.is_admin)
# when
result = self.ll.purge_all_deleted_tasks(admin)
# then
self.assertEqual(1, self.pl.count_tasks())
self.assertEqual(0, self.pl.count_tasks(is_deleted=True))
self.assertEqual(1, self.pl.count_tasks(is_deleted=False))
# and
self.assertEqual(0, result)
|
izrik/tudor
|
tests/logic_t/layer/LogicLayer/test_purge_all_deleted_tasks.py
|
Python
|
gpl-2.0
| 2,763
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, os, sys, traceback, time, threading
def test(b):
if not b:
raise RuntimeError('test assertion failed')
class Dispatcher:
def __init__(self):
self._calls = []
self._terminated = False
self._cond = threading.Condition()
self._thread = threading.Thread(target=self.run)
self._thread.start()
Dispatcher._instance = self
def dispatch(self, call, con):
with self._cond:
self._calls.append(call)
if len(self._calls) == 1:
self._cond.notify()
def run(self):
while True:
call = None
with self._cond:
while not self._terminated and len(self._calls) == 0:
self._cond.wait()
if len(self._calls) > 0:
call = self._calls.pop(0)
elif self._terminated:
# Terminate only once all calls are dispatched.
return
if call:
try:
call()
except:
# Exceptions should never propagate here.
test(False)
@staticmethod
def terminate():
with Dispatcher._instance._cond:
Dispatcher._instance._terminated = True
Dispatcher._instance._cond.notify()
Dispatcher._instance._thread.join()
Dispatcher._instance = None
@staticmethod
def isDispatcherThread():
return threading.current_thread() == Dispatcher._instance._thread
@staticmethod
def instance():
return Dispatcher._instance
|
ljx0305/ice
|
python/test/Ice/dispatcher/Dispatcher.py
|
Python
|
gpl-2.0
| 1,991
|
"""
/***************************************************************************
Name : Export Data from STDM
Description : LEGACY CODE, NEEDS TO BE UPDATED.
Export data to selected OGR formats
Date : 24/March/12
copyright : (C) 2012 by John Gitau
email : gkahiu@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import (
Qt,
SIGNAL
)
import sqlalchemy
from stdm.utils import *
from stdm.ui.reports import SqlHighlighter
from stdm.data import (
process_report_filter,
table_column_names,
unique_column_values,
pg_tables
)
from stdm.data.importexport import (
OGRWriter,
vectorFileDir,
setVectorFileDir
)
from .ui_export_data import Ui_frmExportWizard
class ExportData(QWizard,Ui_frmExportWizard):
def __init__(self,parent=None):
QWizard.__init__(self,parent)
self.setupUi(self)
#Event Handlers
self.btnDestFile.clicked.connect(self.setDestFile)
self.lstSrcTab.itemSelectionChanged.connect(self.srcSelectChanged)
self.btnUniqueVals.clicked.connect(self.colUniqueValues)
#Query Builder signals
self.lstQueryCols.itemDoubleClicked.connect(self.filter_insertField)
self.lstUniqueVals.itemDoubleClicked.connect(self.filter_insertField)
self.btnOpEq.clicked.connect(self.filter_insertEq)
self.btnOpNotEq.clicked.connect(self.filter_insertNotEq)
self.btnOpLike.clicked.connect(self.filter_insertLike)
self.btnOpGreater.clicked.connect(self.filter_greaterThan)
self.btnOpGreaterEq.clicked.connect(self.filter_greaterEq)
self.btnOpAnd.clicked.connect(self.filter_insertAnd)
self.btnOpLess.clicked.connect(self.filter_insertLess)
self.btnOpLessEq.clicked.connect(self.filter_insertLessEq)
self.btnOpOr.clicked.connect(self.filter_insertOR)
self.btnClearQuery.clicked.connect(self.filter_clearQuery)
self.btnQueryVerify.clicked.connect(self.filter_verifyQuery)
#Init controls
self.initControls()
#Register fields
self.registerFields()
def initControls(self):
#Initialize controls
self.cboSpatialCols_2.setEnabled(False)
self.gpQBuilder.setChecked(False)
#Query Builder section
self.txtWhereQuery.setWordWrapMode(QTextOption.WordWrap)
#Custom SQL highlighter
sqlHighlighter = SqlHighlighter(self.txtWhereQuery)
def registerFields(self):
#Destination file name and format
pgDestination = self.page(0)
pgDestination.registerField("destFile*",self.txtExportPath)
#Export table options page
pgExportTab = self.page(1)
pgExportTab.registerField("srcTabIndex*",self.lstSrcTab)
pgExportTab.registerField("geomCol",self.cboSpatialCols_2,"currentText",SIGNAL("currentIndexChanged(int)"))
def initializePage(self,int):
#Re-implementation of wizard page initialization
if int==1:
#Load tables
self.loadSourceTables()
if int==2:
#Load columns for query builder
selTableIndex = self.field("srcTabIndex")
self.srcTab = str(self.lstSrcTab.item(selTableIndex).text())
self.lstQueryCols.clear()
self.lstQueryCols.addItems(self.allCols)
def validateCurrentPage(self):
#Validate the current page before proceeding to the next one
validPage = True
if self.currentId() == 1:
if len(self.lstSrcTab.selectedItems()) == 0:
self.ErrorInfoMessage("Please select a table whose contents are to be exported.")
validPage=False
else:
if len(self.selectedColumns())==0:
self.ErrorInfoMessage("Please select at least one textual column whose values are to be exported.")
validPage=False
#Set Geometry column
geomCol = str(self.field("geomCol"))
self.geomColumn = "" if geomCol == "NULL" else geomCol
if self.currentId()==2:
validPage = self.execExport()
return validPage
def selectedColumns(self):
#Get the selected columns to be imported
tabCols=[]
for c in range(self.lstSrcCols_2.count()):
srcCol=self.lstSrcCols_2.item(c)
if srcCol.checkState() == Qt.Checked:
tabCols.append(srcCol.text())
return tabCols
def loadSourceTables(self):
#Load all STDM tables
self.lstSrcTab.clear()
tables = pg_tables()
for t in tables:
tabItem = QListWidgetItem(t,self.lstSrcTab)
tabItem.setIcon(QIcon(":/plugins/stdm/images/icons/table.png"))
self.lstSrcTab.addItem(tabItem)
def setDestFile(self):
#Set the file path to the destination file
if self.rbShp.isChecked():
ogrFilter = "ESRI Shapefile (*.shp)"
elif self.rbCSV.isChecked():
ogrFilter = "Comma Separated Values (*.csv)"
elif self.rbMapInfo.isChecked():
ogrFilter = "MapInfo File (*.tab)"
elif self.rbGPX.isChecked():
ogrFilter = "GPX (*.gpx)"
elif self.rbDXF.isChecked():
ogrFilter = "DXF (*.dxf)"
destFile = QFileDialog.getSaveFileName(self,"Select Output File",vectorFileDir(),ogrFilter)
if destFile != "":
self.txtExportPath.setText(destFile)
def srcSelectChanged(self):
'''
Handler when a source table item is clicked,
clears previous selections
'''
selTabs=self.lstSrcTab.selectedItems()
if len(selTabs) > 0:
selTab=selTabs[0]
#Load columns for the selected table
self.loadColumns(selTab.text())
def loadColumns(self,table):
#Load textual and spatial (if available) columns
#Get spatial columns first
spColumns = table_column_names(table,True)
self.cboSpatialCols_2.clear()
self.cboSpatialCols_2.addItems(spColumns)
#Textual Columns
self.lstSrcCols_2.clear()
self.allCols = table_column_names(table)
for sc in spColumns:
colIndex = getIndex(self.allCols,sc)
if colIndex != -1:
self.allCols.remove(sc)
for col in self.allCols:
tabItem = QListWidgetItem(col,self.lstSrcCols_2)
tabItem.setCheckState(Qt.Unchecked)
tabItem.setIcon(QIcon(":/plugins/stdm/images/icons/column.png"))
self.lstSrcCols_2.addItem(tabItem)
if len(spColumns) > 0:
self.cboSpatialCols_2.setEnabled(True)
def colUniqueValues(self):
#Slot for getting unique values for the selected column
self.lstUniqueVals.clear()
selCols = self.lstQueryCols.selectedItems()
if len(selCols) > 0:
selCol = selCols[0]
colName = selCol.text()
uniqVals = unique_column_values(self.srcTab,colName)
self.lstUniqueVals.addItems(uniqVals)
self.lstUniqueVals.sortItems()
def execExport(self):
#Initiate the export process
succeed = False
targetFile = str(self.field("destFile"))
writer = OGRWriter(targetFile)
resultSet = self.filter_buildQuery()
if resultSet is None:
return succeed
if resultSet.rowcount == 0:
self.ErrorInfoMessage("There are no records to export")
return succeed
try:
writer.db2Feat(self,self.srcTab,resultSet,self.selectedColumns(),self.geomColumn)
self.InfoMessage("Features in '%s' have been successfully exported!"%(self.srcTab))
#Update directory info in the registry
setVectorFileDir(targetFile)
succeed=True
except:
self.ErrorInfoMessage(str(sys.exc_info()[1]))
return succeed
def filter_clearQuery(self):
#Deletes all the text in the SQL text editor
self.txtWhereQuery.clear()
def filter_verifyQuery(self):
#Verify the query expression
if len(self.txtWhereQuery.toPlainText()) == 0:
self.ErrorInfoMessage("No filter has been defined.")
else:
results = self.filter_buildQuery()
if results != None:
rLen = results.rowcount
msg = "The SQL statement was successfully verified.\n" + str(rLen) + " record(s) returned."
self.InfoMessage(msg)
def filter_buildQuery(self):
#Build query set and return results
queryCols = self.selectedColumns()
if self.geomColumn != "":
queryCols.append("ST_AsText(%s)"%(self.geomColumn))
columnList = ",".join(queryCols)
whereStmnt = self.txtWhereQuery.toPlainText()
sortStmnt=''
results=None
try:
results = process_report_filter(self.srcTab,columnList,whereStmnt,sortStmnt)
except sqlalchemy.exc.DataError,e:
if e is None:
errMessage = "Database Error Message - NOT AVAILABLE"
else:
errMessage = e.message
self.ErrorInfoMessage("The SQL statement is invalid!\n" + errMessage)
return results
def filter_insertField(self,lstItem):
'''
Inserts the text of the clicked field item into the
SQL parser text editor.
'''
self.txtWhereQuery.insertPlainText(lstItem.text())
def filter_insertEq(self):
#Insert Equal operator
self.txtWhereQuery.insertPlainText(" = ")
def filter_insertNotEq(self):
#Insert Not Equal to
self.txtWhereQuery.insertPlainText(" <> ")
def filter_insertLike(self):
#Insert LIKE operator
self.txtWhereQuery.insertPlainText(" LIKE ")
def filter_greaterThan(self):
#Insert greater than
self.txtWhereQuery.insertPlainText(" > ")
def filter_greaterEq(self):
#Insert Greater than or equal to
self.txtWhereQuery.insertPlainText(" >= ")
def filter_insertAnd(self):
#Insert AND
self.txtWhereQuery.insertPlainText(" AND ")
def filter_insertLess(self):
self.txtWhereQuery.insertPlainText(" < ")
def filter_insertLessEq(self):
self.txtWhereQuery.insertPlainText(" <= ")
def filter_insertOR(self):
self.txtWhereQuery.insertPlainText(" OR ")
def keyPressEvent(self,e):
'''
Override method for preventing the dialog from
closing itself when the escape key is hit
'''
if e.key() == Qt.Key_Escape:
pass
def InfoMessage(self,message):
#Information message box
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(message)
msg.exec_()
def ErrorInfoMessage(self,Message):
#Error Message Box
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(Message)
msg.exec_()
|
olivierdalang/stdm
|
ui/export_data.py
|
Python
|
gpl-2.0
| 13,651
|
from Cantera import *
# thermo parametrizations
#from Cantera.Species.Thermo.NasaPolynomial import NasaPolynomial
|
HyperloopTeam/FullOpenMDAO
|
cantera-2.0.2/interfaces/python/MixMaster/config.py
|
Python
|
gpl-2.0
| 116
|
# coding=utf-8
# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import time
from testutil.dott import feature, sh, testtmp # noqa: F401
sh % "setconfig 'extensions.treemanifest=!'"
(
sh % "cat"
<< r"""
[extdiff]
# for portability:
pdiff = sh "$RUNTESTDIR/pdiff"
"""
>> "$HGRCPATH"
)
# Create a repo with some stuff in it:
sh % "hg init a"
sh % "cd a"
sh % "echo a" > "a"
sh % "echo a" > "d"
sh % "echo a" > "e"
sh % "hg ci -qAm0"
sh % "echo b" > "a"
sh % "hg ci -m1 -u bar"
sh % "hg mv a b"
sh % "hg ci -m2"
sh % "hg cp b c"
sh % "hg ci -m3 -u baz"
sh % "echo b" > "d"
sh % "echo f" > "e"
sh % "hg ci -m4"
sh % "hg up -q 3"
sh % "echo b" > "e"
sh % "hg ci -m5"
# (Make sure mtime < fsnow to make the next merge commit stable)
time.sleep(1)
sh % "hg status"
sh % "hg debugsetparents 4 5"
sh % "hg ci -m6"
# (Force "refersh" treestate)
sh % "hg up -qC null"
sh % "hg up -qC tip"
sh % "hg debugmakepublic 3"
sh % "hg log -G --template '{author}@{rev}.{phase}: {desc}\\n'" == r"""
@ test@6.draft: 6
โโโฎ
โ o test@5.draft: 5
โ โ
o โ test@4.draft: 4
โโโฏ
o baz@3.public: 3
โ
o test@2.public: 2
โ
o bar@1.public: 1
โ
o test@0.public: 0"""
# Can't continue without starting:
sh % "hg rm -q e"
sh % "hg graft --continue" == r"""
abort: no graft in progress
[255]"""
sh % "hg graft --abort" == r"""
abort: no graft in progress
[255]"""
sh % "hg revert -r . -q e"
# Need to specify a rev:
sh % "hg graft" == r"""
abort: no revisions specified
[255]"""
# Empty revision set was specified
sh % "hg graft -r '2::1'" == r"""
abort: empty revision set was specified
[255]"""
# Can't graft ancestor:
sh % "hg graft 1 2" == r"""
skipping ancestor revision 5d205f8b35b6
skipping ancestor revision 5c095ad7e90f
[255]"""
# Specify revisions with -r:
sh % "hg graft -r 1 -r 2" == r"""
skipping ancestor revision 5d205f8b35b6
skipping ancestor revision 5c095ad7e90f
[255]"""
sh % "hg graft -r 1 2" == r"""
warning: inconsistent use of --rev might give unexpected revision ordering!
skipping ancestor revision 5c095ad7e90f
skipping ancestor revision 5d205f8b35b6
[255]"""
# Can't graft with dirty wd:
sh % "hg up -q 0"
sh % "echo foo" > "a"
sh % "hg graft 1" == r"""
abort: uncommitted changes
[255]"""
sh % "hg revert a"
# Graft a rename:
# (this also tests that editor is invoked if '--edit' is specified)
sh % "hg status --rev '2^1' --rev 2" == r"""
A b
R a"""
sh % "'HGEDITOR=cat' hg graft 2 -u foo --edit" == r"""
grafting 5c095ad7e90f "2"
merging a and b to b
2
HG: Enter commit message. Lines beginning with 'HG:' are removed.
HG: Leave message empty to abort commit.
HG: --
HG: user: foo
HG: branch 'default'
HG: added b
HG: removed a"""
sh % "hg export tip --git" == r"""
# HG changeset patch
# User foo
# Date 0 0
# Thu Jan 01 00:00:00 1970 +0000
# Node ID ef0ef43d49e79e81ddafdc7997401ba0041efc82
# Parent 68795b066622ca79a25816a662041d8f78f3cd9e
2
diff --git a/a b/b
rename from a
rename to b"""
# Look for extra:source
sh % "hg log --debug -r tip" == r"""
commit: ef0ef43d49e79e81ddafdc7997401ba0041efc82
phase: draft
manifest: e59b6b228f9cbf9903d5e9abf996e083a1f533eb
user: foo
date: Thu Jan 01 00:00:00 1970 +0000
files+: b
files-: a
extra: branch=default
extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
description:
2"""
# Graft out of order, skipping a merge
# (this also tests that editor is not invoked if '--edit' is not specified)
sh % "hg graft 1 5 4 3 'merge()' -n" == r'''
skipping ungraftable merge revision 6
grafting 5d205f8b35b6 "1"
grafting 5345cd5c0f38 "5"
grafting 9c233e8e184d "4"
grafting 4c60f11aa304 "3"'''
sh % "'HGEDITOR=cat' hg graft 1 5 'merge()' --debug --config worker.backgroundclose=False" == r"""
skipping ungraftable merge revision 6
grafting 5d205f8b35b6 "1"
searching for copies back to 5d205f8b35b6
unmatched files in local:
b
all copies found (* = to merge, ! = divergent, % = renamed and deleted):
src: 'a' -> dst: 'b' *
checking for directory renames
resolving manifests
branchmerge: True, force: True, partial: False
ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6
preserving b for resolve of b
b: local copied/moved from a -> m (premerge)
picktool() hgmerge internal:merge
picked tool ':merge' for b (binary False symlink False changedelete False)
merging b and a to b
my b@ef0ef43d49e7+ other a@5d205f8b35b6 ancestor a@68795b066622
premerge successful
committing files:
b
committing manifest
committing changelog
grafting 5345cd5c0f38 "5"
searching for copies back to 5d205f8b35b6
unmatched files in other (from topological common ancestor):
c
resolving manifests
branchmerge: True, force: True, partial: False
ancestor: 4c60f11aa304, local: 6b9e5368ca4e+, remote: 5345cd5c0f38
e: remote is newer -> g
getting e
committing files:
e
committing manifest
committing changelog"""
sh % "'HGEDITOR=cat' hg graft 4 3 --log --debug" == r"""
grafting 9c233e8e184d "4"
searching for copies back to 5d205f8b35b6
unmatched files in other (from topological common ancestor):
c
resolving manifests
branchmerge: True, force: True, partial: False
ancestor: 4c60f11aa304, local: 9436191a062e+, remote: 9c233e8e184d
preserving e for resolve of e
d: remote is newer -> g
getting d
e: versions differ -> m (premerge)
picktool() hgmerge internal:merge
picked tool ':merge' for e (binary False symlink False changedelete False)
merging e
my e@9436191a062e+ other e@9c233e8e184d ancestor e@4c60f11aa304
e: versions differ -> m (merge)
picktool() hgmerge internal:merge
picked tool ':merge' for e (binary False symlink False changedelete False)
my e@9436191a062e+ other e@9c233e8e184d ancestor e@4c60f11aa304
warning: 1 conflicts while merging e! (edit, then use 'hg resolve --mark')
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue --log')
[255]"""
# Summary should mention graft:
sh % "hg summary" == r"""
parent: 9436191a062e
5
commit: 2 modified, 2 unknown, 1 unresolved (graft in progress)
phases: 6 draft"""
# Using status to get more context
sh % "hg status --verbose" == r"""
M d
M e
? a.orig
? e.orig
# The repository is in an unfinished *graft* state.
# Unresolved merge conflicts:
# (trailing space)
# e
# (trailing space)
# To mark files as resolved: hg resolve --mark FILE
# To continue: hg graft --continue
# To abort: hg update --clean . (warning: this will discard uncommitted changes)"""
# Commit while interrupted should fail:
sh % "hg ci -m 'commit interrupted graft'" == r"""
abort: graft in progress
(use 'hg graft --continue' or 'hg graft --abort' to abort)
[255]"""
# Abort the graft and try committing:
sh % "hg graft --abort" == "2 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg status --verbose" == r"""
? a.orig
? e.orig"""
sh % "echo c" >> "e"
sh % "hg ci -mtest"
sh % "hg debugstrip ." == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# Graft again:
sh % "hg graft 4 3 'merge()'" == r"""
skipping ungraftable merge revision 6
grafting 9c233e8e184d "4"
merging e
warning: 1 conflicts while merging e! (edit, then use 'hg resolve --mark')
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
[255]"""
# Continue without resolve should fail:
sh % "hg continue" == r"""
grafting 9c233e8e184d "4"
abort: unresolved merge conflicts (see 'hg help resolve')
[255]"""
# Fix up:
sh % "echo b" > "e"
sh % "hg resolve -m e" == r"""
(no more unresolved files)
continue: hg graft --continue"""
# Continue with a revision should fail:
sh % "hg graft -c 6" == r"""
abort: can't specify --continue and revisions
[255]"""
sh % "hg graft -c -r 6" == r"""
abort: can't specify --continue and revisions
[255]"""
sh % "hg graft --abort -r 6" == r"""
abort: can't specify --abort and revisions
[255]"""
# Continue for real, clobber usernames
sh % "hg graft -c -U" == r'''
grafting 9c233e8e184d "4"
grafting 4c60f11aa304 "3"'''
# Compare with original:
sh % "hg diff -r 6" == r"""
diff -r 7f1f8cbe8466 e
--- a/e Thu Jan 01 00:00:00 1970 +0000
+++ b/e Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +1,1 @@
-f
+b"""
# XXX: Copy-tracing (b and c are "copied" from a) is somehow broken with the
# Rust debugstrip and invalidatelinkrev repo requirement. We probalby
# want to fix copy tracing or linkrev in other ways.
sh % "hg status --rev '0:.' -C" == r"""
M d
M e
A b
a
A c
a
R a"""
# View graph:
sh % "hg log -G --template '{author}@{rev}.{phase}: {desc}\\n'" == r"""
@ test@11.draft: 3
โ
o test@10.draft: 4
โ
o test@9.draft: 5
โ
o bar@8.draft: 1
โ
o foo@7.draft: 2
โ
โ o test@6.draft: 6
โ โโโฎ
โ โ o test@5.draft: 5
โ โ โ
โ o โ test@4.draft: 4
โ โโโฏ
โ o baz@3.public: 3
โ โ
โ o test@2.public: 2
โ โ
โ o bar@1.public: 1
โโโฏ
o test@0.public: 0"""
# Graft again onto another branch should preserve the original source
sh % "hg up -q 0"
sh % "echo g" > "g"
sh % "hg add g" == ""
sh % "hg ci -m 7" == ""
sh % "hg graft 7" == 'grafting ef0ef43d49e7 "2"'
sh % "hg log -r 7 --template '{rev}:{node}\\n'" == "7:ef0ef43d49e79e81ddafdc7997401ba0041efc82"
sh % "hg log -r 2 --template '{rev}:{node}\\n'" == "2:5c095ad7e90f871700f02dd1fa5012cb4498a2d4"
sh % "hg log --debug -r tip" == r"""
commit: 7a4785234d87ec1aa420ed6b11afe40fa73e12a9
phase: draft
manifest: dc313617b8c32457c0d589e0dbbedfe71f3cd637
user: foo
date: Thu Jan 01 00:00:00 1970 +0000
files+: b
files-: a
extra: branch=default
extra: intermediate-source=ef0ef43d49e79e81ddafdc7997401ba0041efc82
extra: source=5c095ad7e90f871700f02dd1fa5012cb4498a2d4
description:
2"""
sh % "hg up -q 6"
sh % "hg diff -r 2 -r 13" == r"""
diff -r 5c095ad7e90f -r 7a4785234d87 b
--- a/b Thu Jan 01 00:00:00 1970 +0000
+++ b/b Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +1,1 @@
-b
+a
diff -r 5c095ad7e90f -r 7a4785234d87 g
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/g Thu Jan 01 00:00:00 1970 +0000
@@ -0,0 +1,1 @@
+g"""
sh % "hg diff -r 2 -r 13 -X ." == ""
# Graft with --log
sh % "hg up -Cq 1"
sh % "hg graft 3 --log -u foo" == r"""
grafting 4c60f11aa304 "3"
warning: can't find ancestor for 'c' copied from 'b'!"""
sh % "hg log --template '{rev}:{node|short} {parents} {desc}\\n' -r tip" == r"""
14:0c921c65ef1e 5d205f8b35b6 3
(grafted from 4c60f11aa304a54ae1c199feb94e7fc771e51ed8)"""
# Resolve conflicted graft
sh % "hg up -q 0"
sh % "echo b" > "a"
sh % "hg ci -m 8"
sh % "echo c" > "a"
sh % "hg ci -m 9"
sh % "hg graft 1 --tool 'internal:fail'" == r"""
grafting 5d205f8b35b6 "1"
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
[255]"""
sh % "hg resolve --all" == r"""
merging a
warning: 1 conflicts while merging a! (edit, then use 'hg resolve --mark')
[1]"""
sh % "cat a" == r"""
<<<<<<< local: aaa4406d4f0a - test: 9
c
=======
b
>>>>>>> graft: 5d205f8b35b6 - bar: 1"""
sh % "echo b" > "a"
sh % "hg resolve -m a" == r"""
(no more unresolved files)
continue: hg graft --continue"""
sh % "hg graft -c" == 'grafting 5d205f8b35b6 "1"'
sh % "hg export tip --git" == r"""
# HG changeset patch
# User bar
# Date 0 0
# Thu Jan 01 00:00:00 1970 +0000
# Node ID f67661df0c4804d301f064f332b57e7d5ddaf2be
# Parent aaa4406d4f0ae9befd6e58c82ec63706460cbca6
1
diff --git a/a b/a
--- a/a
+++ b/a
@@ -1,1 +1,1 @@
-c
+b"""
# Resolve conflicted graft with rename
sh % "echo c" > "a"
sh % "hg ci -m 10"
sh % "hg graft 2 --tool 'internal:fail'" == r"""
grafting 5c095ad7e90f "2"
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
[255]"""
# XXX: This part is broken because copy-tracing is broken.
if False:
sh % "hg resolve --all" == r"""
merging a and b to b
(no more unresolved files)
continue: hg graft --continue"""
sh % "hg graft -c" == 'grafting 5c095ad7e90f "2"'
sh % "hg export tip --git" == r"""
# HG changeset patch
# User test
# Date 0 0
# Thu Jan 01 00:00:00 1970 +0000
# Node ID 9627f653b421c61fc1ea4c4e366745070fa3d2bc
# Parent ee295f490a40b97f3d18dd4c4f1c8936c233b612
2
diff --git a/a b/b
rename from a
rename to b"""
# graft with --force (still doesn't graft merges)
sh % "newrepo"
(
sh % "drawdag"
<< r"""
C D
|/|
A B
"""
)
sh % "hg update -q $C"
sh % "hg graft $B" == 'grafting fc2b737bb2e5 "B"'
sh % "hg rm A B C"
sh % "hg commit -m remove-all"
sh % "hg graft $A $D" == r"""
skipping ungraftable merge revision 3
skipping ancestor revision 426bada5c675
[255]"""
sh % "hg graft $B $A $D --force" == r'''
skipping ungraftable merge revision 3
grafting fc2b737bb2e5 "B"
grafting 426bada5c675 "A"'''
# graft --force after backout
sh % "echo abc" > "A"
sh % "hg ci -m to-backout"
sh % "hg bookmark -i to-backout"
sh % "hg backout to-backout" == r"""
reverting A
changeset 14707ec2ae63 backs out changeset b2fde3ce6adf"""
sh % "hg graft to-backout" == r"""
skipping ancestor revision b2fde3ce6adf
[255]"""
sh % "hg graft to-backout --force" == r"""
grafting b2fde3ce6adf "to-backout" (to-backout)
merging A"""
sh % "cat A" == "abc"
# graft --continue after --force
sh % "echo def" > "A"
sh % "hg ci -m 31"
sh % "hg graft to-backout --force --tool 'internal:fail'" == r"""
grafting b2fde3ce6adf "to-backout" (to-backout)
abort: unresolved conflicts, can't continue
(use 'hg resolve' and 'hg graft --continue')
[255]"""
sh % "echo abc" > "A"
sh % "hg resolve -qm A" == "continue: hg graft --continue"
sh % "hg graft -c" == 'grafting b2fde3ce6adf "to-backout" (to-backout)'
sh % "cat A" == "abc"
# Empty graft
sh % "newrepo"
(
sh % "drawdag"
<< r"""
A B # B/A=A
"""
)
sh % "hg up -qr $B"
sh % "hg graft $A" == r"""
grafting 426bada5c675 "A"
note: graft of 426bada5c675 created no changes to commit"""
# Graft to duplicate a commit
sh % "newrepo graftsibling"
sh % "touch a"
sh % "hg commit -qAm a"
sh % "touch b"
sh % "hg commit -qAm b"
sh % "hg log -G -T '{rev}\\n'" == r"""
@ 1
โ
o 0"""
sh % "hg up -q 0"
sh % "hg graft -r 1" == 'grafting 0e067c57feba "b"'
sh % "hg log -G -T '{rev}\\n'" == r"""
@ 2
โ
โ o 1
โโโฏ
o 0"""
# Graft to duplicate a commit twice
sh % "hg up -q 0"
sh % "hg graft -r 2" == 'grafting 044ec77f6389 "b"'
sh % "hg log -G -T '{rev}\\n'" == r"""
@ 3
โ
โ o 2
โโโฏ
โ o 1
โโโฏ
o 0"""
# Graft from behind a move or rename
# ==================================
# NOTE: This is affected by issue5343, and will need updating when it's fixed
# Possible cases during a regular graft (when ca is between cta and c2):
# name | c1<-cta | cta<->ca | ca->c2
# A.0 | | |
# A.1 | X | |
# A.2 | | X |
# A.3 | | | X
# A.4 | X | X |
# A.5 | X | | X
# A.6 | | X | X
# A.7 | X | X | X
# A.0 is trivial, and doesn't need copy tracking.
# For A.1, a forward rename is recorded in the c1 pass, to be followed later.
# In A.2, the rename is recorded in the c2 pass and followed backwards.
# A.3 is recorded in the c2 pass as a forward rename to be duplicated on target.
# In A.4, both passes of checkcopies record incomplete renames, which are
# then joined in mergecopies to record a rename to be followed.
# In A.5 and A.7, the c1 pass records an incomplete rename, while the c2 pass
# records an incomplete divergence. The incomplete rename is then joined to the
# appropriate side of the incomplete divergence, and the result is recorded as a
# divergence. The code doesn't distinguish at all between these two cases, since
# the end result of them is the same: an incomplete divergence joined with an
# incomplete rename into a divergence.
# Finally, A.6 records a divergence entirely in the c2 pass.
# A.4 has a degenerate case a<-b<-a->a, where checkcopies isn't needed at all.
# A.5 has a special case a<-b<-b->a, which is treated like a<-b->a in a merge.
# A.6 has a special case a<-a<-b->a. Here, checkcopies will find a spurious
# incomplete divergence, which is in fact complete. This is handled later in
# mergecopies.
# A.7 has 4 special cases: a<-b<-a->b (the "ping-pong" case), a<-b<-c->b,
# a<-b<-a->c and a<-b<-c->a. Of these, only the "ping-pong" case is interesting,
# the others are fairly trivial (a<-b<-c->b and a<-b<-a->c proceed like the base
# case, a<-b<-c->a is treated the same as a<-b<-b->a).
# f5a therefore tests the "ping-pong" rename case, where a file is renamed to the
# same name on both branches, then the rename is backed out on one branch, and
# the backout is grafted to the other branch. This creates a challenging rename
# sequence of a<-b<-a->b in the graft target, topological CA, graft CA and graft
# source, respectively. Since rename detection will run on the c1 side for such a
# sequence (as for technical reasons, we split the c1 and c2 sides not at the
# graft CA, but rather at the topological CA), it will pick up a false rename,
# and cause a spurious merge conflict. This false rename is always exactly the
# reverse of the true rename that would be detected on the c2 side, so we can
# correct for it by detecting this condition and reversing as necessary.
# First, set up the repository with commits to be grafted
sh % "hg init ../graftmove"
sh % "cd ../graftmove"
sh % "echo c1a" > "f1a"
sh % "echo c2a" > "f2a"
sh % "echo c3a" > "f3a"
sh % "echo c4a" > "f4a"
sh % "echo c5a" > "f5a"
sh % "hg ci -qAm A0"
sh % "hg mv f1a f1b"
sh % "hg mv f3a f3b"
sh % "hg mv f5a f5b"
sh % "hg ci -qAm B0"
sh % "echo c1c" > "f1b"
sh % "hg mv f2a f2c"
sh % "hg mv f5b f5a"
sh % "echo c5c" > "f5a"
sh % "hg ci -qAm C0"
sh % "hg mv f3b f3d"
sh % "echo c4d" > "f4a"
sh % "hg ci -qAm D0"
sh % "hg log -G" == r"""
@ commit: b69f5839d2d9
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: D0
โ
o commit: f58c7e2b28fa
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: C0
โ
o commit: 3d7bba921b5d
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: B0
โ
o commit: 11f7a1b56675
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: A0"""
# Test the cases A.2 (f1x), A.3 (f2x) and a special case of A.6 (f5x) where the
# two renames actually converge to the same name (thus no actual divergence).
sh % "hg up -q 'desc(\"A0\")'"
sh % "'HGEDITOR=echo C1 >' hg graft -r 'desc(\"C0\")' --edit" == r"""
grafting f58c7e2b28fa "C0"
merging f1a and f1b to f1a
merging f5a
warning: can't find ancestor for 'f5a' copied from 'f5b'!"""
sh % "hg status --change ." == r"""
M f1a
M f5a
A f2c
R f2a"""
sh % "hg cat f1a" == "c1c"
sh % "hg cat f1b" == r"""
f1b: no such file in rev c9763722f9bd
[1]"""
# Test the cases A.0 (f4x) and A.6 (f3x)
sh % "'HGEDITOR=echo D1 >' hg graft -r 'desc(\"D0\")' --edit" == r"""
grafting b69f5839d2d9 "D0"
note: possible conflict - f3b was renamed multiple times to:
f3d
f3a
warning: can't find ancestor for 'f3d' copied from 'f3b'!"""
# Set up the repository for some further tests
sh % "hg up -q 'min(desc(A0))'"
sh % "hg mv f1a f1e"
sh % "echo c2e" > "f2a"
sh % "hg mv f3a f3e"
sh % "hg mv f4a f4e"
sh % "hg mv f5a f5b"
sh % "hg ci -qAm E0"
sh % "hg log -G" == r"""
@ commit: 6bd1736cab86
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: E0
โ
โ o commit: 560daee679da
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ summary: D1
โ โ
โ o commit: c9763722f9bd
โโโฏ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: C1
โ
โ o commit: b69f5839d2d9
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ summary: D0
โ โ
โ o commit: f58c7e2b28fa
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ summary: C0
โ โ
โ o commit: 3d7bba921b5d
โโโฏ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ summary: B0
โ
o commit: 11f7a1b56675
user: test
date: Thu Jan 01 00:00:00 1970 +0000
summary: A0"""
# Test the cases A.4 (f1x), the "ping-pong" special case of A.7 (f5x),
# and A.3 with a local content change to be preserved (f2x).
sh % "'HGEDITOR=echo C2 >' hg graft -r 'desc(\"C0\")' --edit" == r"""
grafting f58c7e2b28fa "C0"
merging f1e and f1b to f1e
merging f2a and f2c to f2c
merging f5b and f5a to f5a"""
# Test the cases A.1 (f4x) and A.7 (f3x).
sh % "'HGEDITOR=echo D2 >' hg graft -r 'desc(\"D0\")' --edit" == r"""
grafting b69f5839d2d9 "D0"
note: possible conflict - f3b was renamed multiple times to:
f3e
f3d
merging f4e and f4a to f4e
warning: can't find ancestor for 'f3d' copied from 'f3b'!"""
# Check the results of the grafts tested
sh % "hg log -CGv --patch --git" == r"""
@ commit: 93ee502e8b0a
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ files: f3d f4e
โ description:
โ D2
โ
โ
โ diff --git a/f3d b/f3d
โ new file mode 100644
โ --- /dev/null
โ +++ b/f3d
โ @@ -0,0 +1,1 @@
โ +c3a
โ diff --git a/f4e b/f4e
โ --- a/f4e
โ +++ b/f4e
โ @@ -1,1 +1,1 @@
โ -c4a
โ +c4d
โ
o commit: 539cf145f496
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ files: f1e f2a f2c f5a f5b
โ copies: f2c (f2a) f5a (f5b)
โ description:
โ C2
โ
โ
โ diff --git a/f1e b/f1e
โ --- a/f1e
โ +++ b/f1e
โ @@ -1,1 +1,1 @@
โ -c1a
โ +c1c
โ diff --git a/f2a b/f2c
โ rename from f2a
โ rename to f2c
โ diff --git a/f5b b/f5a
โ rename from f5b
โ rename to f5a
โ --- a/f5b
โ +++ b/f5a
โ @@ -1,1 +1,1 @@
โ -c5a
โ +c5c
โ
o commit: 6bd1736cab86
โ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ files: f1a f1e f2a f3a f3e f4a f4e f5a f5b
โ copies: f1e (f1a) f3e (f3a) f4e (f4a) f5b (f5a)
โ description:
โ E0
โ
โ
โ diff --git a/f1a b/f1e
โ rename from f1a
โ rename to f1e
โ diff --git a/f2a b/f2a
โ --- a/f2a
โ +++ b/f2a
โ @@ -1,1 +1,1 @@
โ -c2a
โ +c2e
โ diff --git a/f3a b/f3e
โ rename from f3a
โ rename to f3e
โ diff --git a/f4a b/f4e
โ rename from f4a
โ rename to f4e
โ diff --git a/f5a b/f5b
โ rename from f5a
โ rename to f5b
โ
โ o commit: 560daee679da
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ files: f3d f4a
โ โ description:
โ โ D1
โ โ
โ โ
โ โ diff --git a/f3d b/f3d
โ โ new file mode 100644
โ โ --- /dev/null
โ โ +++ b/f3d
โ โ @@ -0,0 +1,1 @@
โ โ +c3a
โ โ diff --git a/f4a b/f4a
โ โ --- a/f4a
โ โ +++ b/f4a
โ โ @@ -1,1 +1,1 @@
โ โ -c4a
โ โ +c4d
โ โ
โ o commit: c9763722f9bd
โโโฏ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ files: f1a f2a f2c f5a
โ copies: f2c (f2a)
โ description:
โ C1
โ
โ
โ diff --git a/f1a b/f1a
โ --- a/f1a
โ +++ b/f1a
โ @@ -1,1 +1,1 @@
โ -c1a
โ +c1c
โ diff --git a/f2a b/f2c
โ rename from f2a
โ rename to f2c
โ diff --git a/f5a b/f5a
โ --- a/f5a
โ +++ b/f5a
โ @@ -1,1 +1,1 @@
โ -c5a
โ +c5c
โ
โ o commit: b69f5839d2d9
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ files: f3b f3d f4a
โ โ copies: f3d (f3b)
โ โ description:
โ โ D0
โ โ
โ โ
โ โ diff --git a/f3b b/f3d
โ โ rename from f3b
โ โ rename to f3d
โ โ diff --git a/f4a b/f4a
โ โ --- a/f4a
โ โ +++ b/f4a
โ โ @@ -1,1 +1,1 @@
โ โ -c4a
โ โ +c4d
โ โ
โ o commit: f58c7e2b28fa
โ โ user: test
โ โ date: Thu Jan 01 00:00:00 1970 +0000
โ โ files: f1b f2a f2c f5a f5b
โ โ copies: f2c (f2a) f5a (f5b)
โ โ description:
โ โ C0
โ โ
โ โ
โ โ diff --git a/f1b b/f1b
โ โ --- a/f1b
โ โ +++ b/f1b
โ โ @@ -1,1 +1,1 @@
โ โ -c1a
โ โ +c1c
โ โ diff --git a/f2a b/f2c
โ โ rename from f2a
โ โ rename to f2c
โ โ diff --git a/f5b b/f5a
โ โ rename from f5b
โ โ rename to f5a
โ โ --- a/f5b
โ โ +++ b/f5a
โ โ @@ -1,1 +1,1 @@
โ โ -c5a
โ โ +c5c
โ โ
โ o commit: 3d7bba921b5d
โโโฏ user: test
โ date: Thu Jan 01 00:00:00 1970 +0000
โ files: f1a f1b f3a f3b f5a f5b
โ copies: f1b (f1a) f3b (f3a) f5b (f5a)
โ description:
โ B0
โ
โ
โ diff --git a/f1a b/f1b
โ rename from f1a
โ rename to f1b
โ diff --git a/f3a b/f3b
โ rename from f3a
โ rename to f3b
โ diff --git a/f5a b/f5b
โ rename from f5a
โ rename to f5b
โ
o commit: 11f7a1b56675
user: test
date: Thu Jan 01 00:00:00 1970 +0000
files: f1a f2a f3a f4a f5a
description:
A0
diff --git a/f1a b/f1a
new file mode 100644
--- /dev/null
+++ b/f1a
@@ -0,0 +1,1 @@
+c1a
diff --git a/f2a b/f2a
new file mode 100644
--- /dev/null
+++ b/f2a
@@ -0,0 +1,1 @@
+c2a
diff --git a/f3a b/f3a
new file mode 100644
--- /dev/null
+++ b/f3a
@@ -0,0 +1,1 @@
+c3a
diff --git a/f4a b/f4a
new file mode 100644
--- /dev/null
+++ b/f4a
@@ -0,0 +1,1 @@
+c4a
diff --git a/f5a b/f5a
new file mode 100644
--- /dev/null
+++ b/f5a
@@ -0,0 +1,1 @@
+c5a"""
sh % "hg cat f2c" == "c2e"
# Check superfluous filemerge of files renamed in the past but untouched by graft
sh % "echo a" > "a"
sh % "hg ci -qAma"
sh % "hg mv a b"
sh % "echo b" > "b"
sh % "hg ci -qAmb"
sh % "echo c" > "c"
sh % "hg ci -qAmc"
sh % "hg up -q '.~2'"
sh % "hg graft tip '-qt:fail'"
sh % "cd .."
# Graft a change into a new file previously grafted into a renamed directory
sh % "hg init dirmovenewfile"
sh % "cd dirmovenewfile"
sh % "mkdir a"
sh % "echo a" > "a/a"
sh % "hg ci -qAma"
sh % "echo x" > "a/x"
sh % "hg ci -qAmx"
sh % "hg up -q 0"
sh % "hg mv -q a b"
sh % "hg ci -qAmb"
sh % "hg graft -q 1"
sh % "hg up -q 1"
sh % "echo y" > "a/x"
sh % "hg ci -qAmy"
sh % "hg up -q 3"
sh % "hg graft -q 4"
sh % "hg status --change ." == "M b/x"
# Prepare for test of skipped changesets and how merges can influence it:
sh % "hg merge -q -r 1 --tool ':local'"
sh % "hg ci -m m"
sh % "echo xx" >> "b/x"
sh % "hg ci -m xx"
sh % "hg log -G -T '{rev} {desc|firstline}'" == r"""
@ 7 xx
โ
o 6 m
โโโฎ
โ o 5 y
โ โ
โ โ o 4 y
โโโโโฏ
โ o 3 x
โ โ
โ o 2 b
โ โ
o โ 1 x
โโโฏ
o 0 a"""
# Grafting of plain changes correctly detects that 3 and 5 should be skipped:
sh % "hg up -qCr 4"
sh % "hg graft --tool ':local' -r '2'" == r'''
grafting 42127f193bcd "b"'''
# Extending the graft range to include a (skipped) merge of 3 will not prevent us from
# also detecting that both 3 and 5 should be skipped:
sh % "hg up -qCr 4"
sh % "hg graft --tool ':local' -r '2 + 6 + 7'" == r"""
skipping ungraftable merge revision 6
grafting 42127f193bcd "b"
grafting d3c3f2b38ecc "xx"
note: graft of d3c3f2b38ecc created no changes to commit"""
sh % "cd .."
|
facebookexperimental/eden
|
eden/scm/tests/test-graft-t.py
|
Python
|
gpl-2.0
| 30,206
|
import tkinter.ttk as tk
import tkinter.messagebox as mb
#thanks to http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/
#and http://stackoverflow.com/questions/6666882/tkinter-python-catching-exceptions
def showerrorbox(func):
def run(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
print(e)
mb.showerror("Error", e)
raise e
return run
|
willm/DDEXUI
|
tkinterutil.py
|
Python
|
gpl-2.0
| 450
|
import numpy as np
import datetime
import dateutil
import copy
from misc.datasets import BasicPropDataset, \
BasicPropAngleDataset, \
BasicPropAngleNoiseDataset, \
BasicPropAngleNoiseBGDataset, \
MnistDataset
import argparse
import matplotlib.pyplot as plt
from matplotlib import gridspec
from vae_half import *
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', default=10,
help='Number of epochs',
type=int)
parser.add_argument('--latent_z', default=10,
help='Latent code dimension',
type=int)
parser.add_argument('--latent_c', default=10,
help='Latent code dimension',
type=int)
args = parser.parse_args()
latent_z = args.latent_z
latent_c = args.latent_c
n_epochs = args.epochs
DATASETS = ['MNIST', 'BPAngleNoise', 'BPAngleNoiseBG']
SAVEPLOTS = './plots'
def plot_reconstruction(network_architecture, info=False, dataset='MNIST', x_sample=None):
# Validate dataset
if isinstance(dataset, str):
dataset = load_dataset(dataset)
if x_sample is None:
# Reconstruct test images using noinfo architecture
x_sample = dataset.test.next_batch(100)[0]
if dataset.dataset_name == "BASICPROP-angle":
x_sample = np.ceil(x_sample)
# Train network
vae = train(network_architecture, training_epochs=n_epochs,
info=info, dataset=dataset)
# Plot reconstructions
x_reconstruct = vae.reconstruct(x_sample)
plt.figure(figsize=(8, 15))
for i in range(3):
plt.subplot(5, 2, 2 * i + 1)
plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1)
if i == 0:
plt.title("Original")
plt.colorbar()
plt.subplot(5, 2, 2 * i + 2)
plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1)
if i == 0:
plt.title("Reconstruction")
plt.colorbar()
plt.suptitle('Info {}'.format(info))
# Save plot
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%H_%M_%S_%Y%m%d')
savepath = '{}/REC_DS-{}_nz{}_nc{}_info{}_{}'.format(SAVEPLOTS,
dataset.dataset_name,
network_architecture['n_z'],
network_architecture['n_c'],
info, timestamp)
plt.savefig(savepath)
def plots_2D(network_architecture, info=False, dataset='MNIST', x_sample=None):
# Validate number of dimensions
if network_architecture['n_z'] > 1:
network_architecture = copy.deepcopy(network_architecture)
network_architecture['n_z'] = 2
if network_architecture['n_c'] > 1:
network_architecture = copy.deepcopy(network_architecture)
network_architecture['n_c'] = 2
# Validate dataset
if isinstance(dataset, str):
dataset = load_dataset(dataset)
if x_sample is None:
# Reconstruct test images using noinfo architecture
x_sample = dataset.test.next_batch(100)[0]
if dataset.dataset_name == "BASICPROP-angle":
x_sample = np.ceil(x_sample)
# Train network
vae_2d = train(network_architecture, training_epochs=n_epochs,
info=info, dataset=dataset)
# 2D scatterplot
z_mu = vae_2d.transform(x_sample)
plt.figure(figsize=(8, 6))
plt.scatter(z_mu[:, 0], z_mu[:, 1])
plt.title("Latent Space {}".format(info))
plt.xlabel('First dimension')
plt.ylabel('Second dimension')
plt.grid()
# Save plot
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%H_%M_%S_%Y%m%d')
savepath = '{}/LAT_DS-{}_nz{}_nc{}_info{}_{}'.format(SAVEPLOTS,
dataset.dataset_name,
network_architecture['n_z'],
network_architecture['n_c'],
info, timestamp)
plt.savefig(savepath)
# 2D reconstructions
# X-axis: second dimension, Y-axis: first dimension
nx = ny = 8
x_values = np.linspace(-2.5, 2.5, nx)
y_values = np.linspace(-2.5, 2.5, ny)
plt.subplot()
gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)
for i, g in enumerate(gs):
ax = plt.subplot(g)
z_mu = np.array([[x_values[i / ny], y_values[i % nx]]] * 100)
x_mean = vae_2d.generate(z_mu)
ax.imshow(x_mean[0].reshape(28, 28))
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('auto')
plt.suptitle('Info {}'.format(info))
# Save plot
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%H_%M_%S_%Y%m%d')
savepath = '{}/LAT_REC_DS-{}_nz{}_nc{}_info{}_{}'.format(SAVEPLOTS,
dataset.dataset_name,
network_architecture['n_z'],
network_architecture['n_c'],
info, timestamp)
plt.savefig(savepath)
def plot_last_2D(network_architecture, info=False, dataset='MNIST', x_sample=None, vae_2d=None):
# Validate dataset
if isinstance(dataset, str):
dataset = load_dataset(dataset)
if x_sample is None:
# Reconstruct test images using noinfo architecture
x_sample = dataset.test.next_batch(100)[0]
if dataset.dataset_name == "BASICPROP-angle":
x_sample = np.ceil(x_sample)
if vae_2d is None:
vae_2d = train(network_architecture, training_epochs=n_epochs,
info=info, dataset=dataset)
latent = vae_2d.transform(x_sample)[0]
nx = ny = 6
x_values = np.linspace(-3.5, 3.5, nx)
y_values = np.linspace(-3.5, 3.5, ny)
plt.subplot()
gs = gridspec.GridSpec(nx, ny, hspace=0.05, wspace=0.05)
for i, g in enumerate(gs):
ax = plt.subplot(g)
lat = copy.deepcopy(latent)
lat[0] = x_values[i / ny]
lat[1] = y_values[i % nx]
z_mu = np.array([lat] * 100)
x_mean = vae_2d.generate(z_mu)
ax.imshow(x_mean[0].reshape(28, 28))
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('auto')
plt.suptitle('Info {}'.format(info))
# Save plot
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%H_%M_%S_%Y%m%d')
savepath = '{}/LAT_REC_v_DS-{}_nz{}_nc{}_info{}_{}'.format(SAVEPLOTS,
dataset.dataset_name,
network_architecture['n_z'],
network_architecture['n_c'],
info, timestamp)
plt.savefig(savepath)
def main():
network_architecture = dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=5, # dimensionality of latent space
n_c=5,
info=False)
for dataset_name in DATASETS:
for info in [True, False]:
dataset = load_dataset(dataset_name)
x_sample = x_sample = dataset.test.next_batch(100)[0]
plot_reconstruction(network_architecture,
info=info,
dataset=dataset,
x_sample=x_sample)
for dataset_name in DATASETS:
for info in [True, False]:
dataset = load_dataset(dataset_name)
x_sample, y_sample = dataset.test.next_batch(5000)
plots_2D(network_architecture,
info=info,
dataset=dataset,
x_sample=x_sample)
for dataset_name in DATASETS:
for info in [True, False]:
dataset = load_dataset(dataset_name)
vae_2d = train(network_architecture, training_epochs=n_epochs,
info=info, dataset=dataset)
x_sample, y_sample = dataset.test.next_batch(1)
plot_last_2D(network_architecture,
info=info,
dataset=dataset,
x_sample=x_sample,
vae_2d=vae_2d)
if __name__ == '__main__':
main()
network_architecture = dict(n_hidden_recog_1=500, # 1st layer encoder neurons
n_hidden_recog_2=500, # 2nd layer encoder neurons
n_hidden_gener_1=500, # 1st layer decoder neurons
n_hidden_gener_2=500, # 2nd layer decoder neurons
n_input=784, # MNIST data input (img shape: 28*28)
n_z=5, # dimensionality of latent space
n_c=5,
info=True)
vae_noinfo = train(network_architecture, training_epochs=n_epochs,
info=False, dataset=dataset)
vae_info = train(network_architecture, training_epochs=n_epochs,
info=True, dataset=dataset)
|
fducau/infoVAE
|
experiments_2.py
|
Python
|
gpl-3.0
| 9,898
|
"""
Unpack Files
"""
import os, sys, shutil, errno
from ScotchPy.application import Application, get_root_directory
from ScotchPy.utils import folder_utils, file_utils
class UnpackFilesApp(Application):
def __init__(self):
super(UnpackFilesApp, self).__init__("Unpack Files")
def run(self):
""" Run the application instance in the calling directory. """
self.unpackfiles(get_root_directory())
def unpackfiles(self, search_directory):
""" Move all files in subdirectories under [search_directory] to root. """
file_utils.remove_useless_files(search_directory)
for root, directories, filenames in os.walk(search_directory):
for filename in filenames:
source_filename = os.path.join(root, filename)
if os.path.isfile(source_filename):
destination = os.path.join(search_directory, filename)
file_utils.move_file(source_filename, destination)
self.log.write("moved_from:{0};moved_to:{1};\n".format(source_filename, destination))
for directory in directories:
subdirectory = os.path.join(root, directory)
folder_utils.remove_if_empty(subdirectory)
folder_utils.remove_if_empty(search_directory)
if __name__ == "__main__":
try:
main = UnpackFilesApp()
main.run()
sys.exit(0)
except Exception as ex:
print("Error:", str(ex), "\n")
raise
sys.exit(-1)
|
admiraltoad/ScotchPy
|
scripts/unpackfiles.py
|
Python
|
gpl-3.0
| 1,350
|
from .ImageHandler import ImageHandler
from .DNN import DNN
import numpy as np
class DNNSwiftException(Exception):
pass
class DNNWrapper(object):
"""
This is an interface class that serves only to simplify the calls to this
package. Most of the functionality lies in the DNN and ImageHandler
classes.
This class stores and reuses the DNN to save time. It only reinitializes
the DNN if parameters change.
"""
def __init__(self, categories, layout):
"""
This is an interface class that serves only to simplify the calls to
this package. Most of the functionality lies in the DNN and
ImageHandler classes.
### --------------------------------------------------------------- ###
### categories ###
### --------------------------------------------------------------- ###
Internally, labels are one-hot unit vectors. 'categories' indicates
the order of labels. For example, a category dictionary of
{'label_A': 0, 'label_B': 1, 'label_C': 2} would expand 'label_A' into
the vector (1, 0, 0), 'label_B' into the vector (0, 1, 0), and
'label_C' into the vector (0, 0, 1).
### --------------------------------------------------------------- ###
### --------------------------------------------------------------- ###
### layout ###
### --------------------------------------------------------------- ###
Defines the layout of the DNN. This should be a list of dictionaries
in the format
[{name: "A", type: "conv", ...},
{name: "B", type: "conv", ...},
{name: "C", type: "fc", ...}]
A detailed guide on the structure of the layout file can be seen in the
vignette.
### --------------------------------------------------------------- ###
:param layout: A list of dictionaries
:param categories: A dictionary of image categories with their
corresponding annotation values. All categories present in
the dataset must be indicated here
"""
self._ready_for_training = False
self._image_handler = None
self._dnn = None
# Params needed by multiple functions
self._categories = categories
self._layout = layout
pass
def initialize_training_data(
self, filename, data_split=None,
index_dict=None, image_key="images", label_key="labels",
outfile=None):
"""
Initialize the ImageHandler. It can either be initialized from
scratch, in which case it splits the data into the training,
validation, and test sets as indicated by the ratios of 'data_split',
or it can be initialized from an existing index dictionary
'index_dict'. If index_dict is not None, then the data_split argument
is ignored. 'index_dict' should be a dictionary as returned by
'save_lists(...)'.
### --------------------------------------------------------------- ###
### hdf5 file structure ###
### --------------------------------------------------------------- ###
The hdf5 file must have the following structure:
FILE
<HDF5 dataset "images": shape (num_images, num_channels, x, y)>
<HDF5 dataset "labels": shape (num_images, 1)>
The names of the dataset can be custom, but must match the parameters
'label_key' and 'image_key'.
### --------------------------------------------------------------- ###
### --------------------------------------------------------------- ###
### data_split ###
### --------------------------------------------------------------- ###
'data_split' should be a list of values indicating the relative sizes
of the training, validation, and testing image sets. E.g.
- data_split = [0.8, 0.1, 0.1] will create three datasets, the
"train" dataset consisting of 80% of the segments, the "val"
dataset consisting of 10%, and the "test" dataset consisting
of 10% of the data.
The values are normalized internally, so [0.8, 0.1, 0.1] is equivalent
to [8, 1, 1] or [16, 2, 2]
### --------------------------------------------------------------- ###
:param filename: The full filename of the hdf5 file with the images to
train on
:param data_split: A list of relative image set sizes for, in this
order, the training, validation, and testing image sets.
:param index_dict: A dictionary containing: 1) A dictionary of
indices and 2) a hash of the data file. The correct dictionary
structure is returned by 'save_lists(...)'
:param label_key: A string indicating which dataset to use as
labels. This is particularly useful if a single hdf5 file has
multiple possible label sets. Defaults to 'labels'
:param image_key: A string indicating which dataset to use as
images. Defaults to 'images'
:param outfile: A string. ImageHandler saves the index lists for each
data set (training, validation, testing). This is the filename
of that pickled data set. Will be saved under self._base_dir.
If 'None', the dictionary isn't saved. Defaults to 'None'. Never
overwrites an existing file.
:return:
"""
# Initialize the image handler
self._image_handler = ImageHandler(
filename=filename, categories=self._categories,
data_split=data_split, index_dict=index_dict,
image_key=image_key, label_key=label_key)
self._ready_for_training = True
# Save the resulting index lists
if outfile is not None:
self._image_handler.save_lists(filename=outfile)
return None
def train_dnn(
self, num_epochs, batch_size=1, weights=None,
verbose=True, weights_dir=".", logfile=None, learning_rate=1e-3,
start_epoch=0, batch_limit=1):
"""
Trains the DNN
:param num_epochs: An integer defining the number of epochs for which
to train the DNN
:param batch_size: An integer defining the batchsize of the DNN.
Typical values should be powers of 2 to optimize speed.
:param weights: A dictionary of weights with keys corresponding to the
node names in layer_params, which require weights (e.g. pooling
layers do not require weights and complex nodes such as the
'inception' nodes require multiple weights). The weights files
output by the training are in the correct format to be read back
in, i.e. to continue training.
:param verbose: A boolean defining the level of detail to be output.
If 'True', then the progress within an epoch is printed. If
'False', then only the validation accuracy after each epoch is
printed.
:param weights_dir: A string defining the output directory into which
to save the weights. This is a relative directory under
"base_dir".
:param logfile: A string defining the file into which to write the
status output. This is an absolute path for easier integration
into other workflows. If 'None' then output is printed to stdout.
:param learning_rate: A floating point value indicating the learning
rate at which to train the DNN. Note that many optimizers already
dynamically adjust the learning rate throughout the learning,
making this parameter unpredictable and possible useless. It only
exists for legacy reasons
:param start_epoch: An integer defining the epoch at which to start
training. If start_epoch == 4, then the first output will have the
name "weights_4.pkl" and "val_4.pkl". This is useful for continuing
training and avoiding filename conflicts. NOTE: This does not
automatically read in the previous weight set. To properly continue
training, you must load the correct weights file manually and pass
the contents to the 'weights' parameter.
:param batch_limit: A float between 0 and 1 indicating what fraction
of the total number of training batches to use in each epoch. As the
images are scrambled before each epoch, this effectively creates
entirely new training sets in each epoch if much lower than 1. This
can be useful if the number of training images in the data set is
much larger than it needs to be.
:return:
"""
if not self._ready_for_training:
raise DNNSwiftException(
"Run 'initialize_training_data(...)' before training DNN")
image_dims = self._image_handler.get_image_dims()
categories = self._categories
# Create DNN object
self._dnn = DNN(
img_dims=image_dims, categories=categories,
layer_params=self._layout, learning_rate=learning_rate,
weights=weights)
# Train DNN
self._dnn.train_network(
batch_size=batch_size, image_handler=self._image_handler,
num_epochs=num_epochs, verbose=verbose, outdir=weights_dir,
logfile=logfile, start_epoch=start_epoch, batch_limit=batch_limit)
return None
def apply_dnn(self, images, weights=None):
"""
Applies the DNN to an input image.
This function reinitializes the DNN when necessary. Specifically, it
compares the weights to the internal weights of the DNN and the
spatial dimensions of 'images' with the internally stored image size
of the DNN. If 'weights' is None, the internal DNN is always used.
:param images: An iterable of numpy arrays with the shape
(spatial, spatial, num_channels). This can also be a 4D numpy
array with the dimensions
(num_images, spatial, spatial, num_channels). Images must all
have the same shape.
:param weights: A dictionary of weights to use. Should match the node
names of the class' layout. In general, only the files outputted
by this class during training should be used as input. If
'weights' is None, the function attempts to use the current
weights of the DNN
:return:
"""
# Ensure all images have the same shape
imshapes = [image.shape for image in images]
if len(set(imshapes)) != 1:
raise DNNSwiftException("All images must have the same shape")
# Extract image shape
image_dims = images[0].shape
# If 'weights' is None, load the DNN weights
if weights is None:
try:
weights = self._dnn.get_weights()
except AttributeError:
raise DNNSwiftException(
"'weights' cannot be None if no initialized DNN exists.")
# Determine if a new DNN must be made
if self._dnn is None:
make_new_dnn = True
else:
make_new_dnn = False
# compare 'weights'
dnn_weights = self._dnn.get_weights()
if set(dnn_weights.keys()) != set(weights.keys()):
make_new_dnn = True
else:
for key in dnn_weights.keys():
if np.sum(~np.equal(dnn_weights[key], weights[key])) != 0:
make_new_dnn = True
break
# compare image dimensions
if image_dims != self._dnn.get_image_dims():
make_new_dnn = True
print("Reinitializing DNN: %s" % str(make_new_dnn))
if make_new_dnn:
self._dnn = DNN(
img_dims=image_dims, categories=self._categories,
layer_params=self._layout, weights=weights)
# Apply network to images
output = self._dnn.run_network(input_images=np.array(images))
return output
def get_images(self, list_name, index_low=None, index_high=None):
"""
This is a wrapper for ImageHandler.get_images
:param list_name: The name of the list to use. This should be one of
"train", "val", or "test"
:param index_low: The first image to retrieve. If 'None' then set to 0
:param index_high: The last image to retrieve (inclusive, not like
python's slicing rules). If 'None' then set to last image of list
:return: A dictionary containing the images and the corresponding
labels in the keys 'images' and 'labels', respectively
"""
return self._image_handler.get_images(
list_name=list_name, index_low=index_low, index_high=index_high)
def get_weights(self):
"""
This is a wrapper function for DNN.get_weights(). Returns None if the
DNN hasn't been initiated yet.
:return:
"""
try:
weights = self._dnn.get_weights()
except AttributeError:
weights = None
return weights
def get_image_dims(self):
"""
This is a wrapper function for DNN.get_image_dims(). Returns None if
the DNN hasn't been initiated yet
:return:
"""
try:
img_dims = self._dnn.get_image_dims()
except AttributeError:
img_dims = None
return img_dims
def print_structure(self, filename):
"""
This is a wrapper function for DNN.print_structure(). Prints the
structure as an image to 'filename' if the DNN has been initialized.
Otherwise it does nothing
:return:
"""
try:
self._dnn.print_structure(filename=filename)
except AttributeError:
pass
return None
|
DragonDuck/DNNSwift
|
dnnSwift/DNNWrapperClass.py
|
Python
|
gpl-3.0
| 14,146
|
import ArduinoUSB
connection = ArduinoUSB.ArduinoUSB()
|
UCSD-AUVSI/NewOnboardSuite
|
OBC/serial_to_Arduino/globalvar_connection.py
|
Python
|
gpl-3.0
| 55
|