code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
import sys
import math
from subprocess import check_call
from subprocess import CalledProcessError
from MCTF_parser import MCTF_parser
file = ""
rate = 0.0
pictures = 33
pixels_in_x = 352
pixels_in_y = 288
subband = 4 # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
parser = MCTF_parser(description="Expands the the LFB texture data using JPEG 2000.")
parser.add_argument("--file", help="file that contains the LFB data. Default = {})".format(file))
parser.add_argument("--rate", help="read only the initial portion of the code-stream, corresponding to an overall bit-rate of \"rate\" bits/sample. Default = {})".format(rate))
parser.pictures(pictures)
parser.pixels_in_x(pixels_in_x)
parser.pixels_in_y(pixels_in_y)
args = parser.parse_known_args()[0]
if args.file:
file = args.file
if args.rate:
rate = float(args.rate)
if args.pictures:
pictures = int(args.pictures)
if args.pixels_in_x:
pixels_in_x = int(args.pixels_in_x)
if args.pixels_in_y:
pixels_in_y = int(args.pixels_in_y)
# Decode YUV
image_number = 0
while image_number < pictures:
#####
# Y #
try: # Jse. Sino existe se crea a movimiento lineal. # no se comprueba si existe el H_maximo donde apoyar dicho movimiento.
f = open(file + "_Y_" + str('%04d' % image_number) + ".j2c", "rb")
f.close()
try: # expand
if rate <= 0.0 :
check_call("trace kdu_expand"
+ " -i " + file + "_Y_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_Y_" + str('%04d' % image_number) + ".raw"
, shell=True)
else :
check_call("trace kdu_expand"
+ " -i " + file + "_Y_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_Y_" + str('%04d' % image_number) + ".raw"
+ " -rate " + rate
, shell=True)
except CalledProcessError:
sys.exit(-1)
except:
f = open(file + "_Y_" + str('%04d' % image_number) + ".raw", "wb")
for a in xrange(pixels_in_x * pixels_in_y):
f.write('%c' % 128)
f.close()
try:
check_call("trace cat " + file + "_Y_" + str('%04d' % image_number) + ".raw >> " + file, shell=True)
except CalledProcessError:
sys.exit(-1)
#####
# U #
try:
f = open(file + "_U_" + str('%04d' % image_number) + ".j2c", "rb")
f.close()
try: # expand
if rate <= 0.0 :
check_call("trace kdu_expand"
+ " -i " + file + "_U_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_U_" + str('%04d' % image_number) + ".raw"
, shell=True)
else :
check_call("trace kdu_expand"
+ " -i " + file + "_U_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_U_" + str('%04d' % image_number) + ".raw"
+ " -rate " + rate
, shell=True)
except CalledProcessError:
sys.exit(-1)
except:
f = open(file + "_U_" + str('%04d' % image_number) + ".raw", "wb")
for a in xrange((pixels_in_x * pixels_in_y)/4):
f.write('%c' % 128)
f.close()
try:
check_call("trace cat " + file + "_U_" + str('%04d' % image_number) + ".raw >> " + file, shell=True)
except CalledProcessError:
sys.exit(-1)
#####
# V #
try:
f = open(file + "_V_" + str('%04d' % image_number) + ".j2c", "rb")
f.close()
try: # expand
if rate <= 0.0 :
check_call("trace kdu_expand"
+ " -i " + file + "_V_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_V_" + str('%04d' % image_number) + ".raw"
, shell=True)
else :
check_call("trace kdu_expand"
+ " -i " + file + "_V_" + str('%04d' % image_number) + ".j2c"
+ " -o " + file + "_V_" + str('%04d' % image_number) + ".raw"
+ " -rate " + rate
, shell=True)
except CalledProcessError:
sys.exit(-1)
except:
f = open(file + "_V_" + str('%04d' % image_number) + ".raw", "wb")
for a in xrange((pixels_in_x * pixels_in_y)/4):
f.write('%c' % 128)
f.close()
try:
check_call("trace cat " + file + "_V_" + str('%04d' % image_number) + ".raw >> " + file, shell=True)
except CalledProcessError:
sys.exit(-1)
image_number += 1
| vicente-gonzalez-ruiz/QSVC | trunk/src/old_py/texture_expand_lfb_j2k.py | Python | gpl-2.0 | 4,853 |
""" instantly/main.py
Defines the basic terminal interface for interacting with Instantly.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from pies.overrides import *
from . import __version__
from .instantly import Instantly
def main():
instantly = Instantly()
if not len(sys.argv) > 1:
print("Instantly allows you to expand simple templates, that take in a set number of arguments")
print("Usage: instantly [template name] to expand a template")
print(" type instantly help for full instructions.")
print("")
print("Installed Templates:")
print("\t" + str(instantly.installed_templates))
sys.exit(1)
command = sys.argv[1]
template_name = sys.argv[2:3] and sys.argv[2] or ""
extra_inputs = sys.argv[2:]
if command == "help":
print("Instantly Commands")
print("")
print("instantly [template name]")
print("\t Expand the named template")
print("instantly help")
print("\t Get full list of commands / help text")
print("instantly find [template name]")
print("\t Find pre-made templates to automate a task online")
print("instantly download [template name]")
print("\t Add a template shared online to your local template repository")
print("instantly install [template directory]")
print("\t Installs an instant_template directory from the local file system "
"or online repository into your personal collection of templates")
print("instantly uninstall [template name]")
print("\t Permanently removes an installed template locally")
print("instantly create_instant_template")
print("\t Create a new instant template to automate a task")
print("instantly share [template name]")
print("\t Share a template you have created with others online")
print("\t Must register your google account with http://instantly.pl/ to do this")
print("instantly unshare [template name]")
print("\t Removes a template that you previously shared from the instantly online repository.")
print("instantly location [template name]")
print("\t Will tell you where the specified template is located on disk.")
print("instantly create_settings [template directory]")
print("\t Will create an alternate settings / template directory within the current directory.")
print("instantly version")
print("\t Will tell you the version of instantly you have installed.")
sys.exit(0)
elif command == "uninstall":
if input("Are you sure you want to delete %s (y/n)? " % template_name).lower() in ("y", "yes"):
if instantly.uninstall(template_name):
print("Successfully removed %s from local templates" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "version":
print("instantly v. {0}".format(__version__))
sys.exit(0)
elif command == "location":
template = instantly.installed_template(template_name)
if not template:
print("Sorry template does not exist!")
sys.exit(1)
return template.location
sys.exit(0)
elif command == "share":
if instantly.share(template_name):
print("Successfully shared %s, thanks for helping to expand the number of instant templates!" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "unshare":
if instantly.unshare(template_name):
print("Successfully un-shared %s!" % template_name)
sys.exit(0)
else:
sys.exit(1)
elif command == "create_settings":
if instantly.create_settings():
print("Successfully created a new settings / templates directory!")
sys.exit(0)
else:
sys.exit(1)
elif command == "find":
results = instantly.find(template_name)
if not results:
print("Sorry: no templates have been shared that match the search term '%s'," % template_name)
print(" but you could always add one ;)")
sys.exit(0)
print("Instantly found the following templates:")
for result in results:
print(result)
print(" To install one of these templates run: instantly install [template_name]")
sys.exit(0)
elif command == "install":
if instantly.install(template_name):
print("%(name)s has been installed as a local template. Run 'instantly %(name)s' to expand it." % \
{"name":template_name})
sys.exit(0)
else:
print("Sorry: no one has thought of a way to instantly '%s'," % template_name)
print(" but you could always create one ;)")
sys.exit(0)
else:
template_name = command
template = instantly.get_template(template_name)
if not template:
print("Sorry: no one has thought of a way to instantly '%s'," % template_name)
print(" but you could always create one ;)")
sys.exit(1)
print("Expanding the following template:")
print(template)
arguments = {}
for argument, argument_definition in itemsview(template.arguments):
print("")
if extra_inputs:
arguments[argument] = extra_inputs.pop(0)
else:
argument_type = argument_definition.get('type', 'string')
default = instantly.settings['defaults'].get(argument, '') or argument_definition.get('default', '')
help_text = argument_definition.get('help_text')
if help_text:
print("Help Text: {0}".format(help_text))
prompt = argument_definition.get('prompt', '')
if default:
prompt += " [Default: {0}]".format(default)
if argument_type == "bool":
prompt += " (y/n)"
prompt += ": "
value = ""
while value == "":
value = input(prompt)
if argument_type == "bool":
if value.lower() in ("y", "yes"):
value = True
elif value.lower() in ("n", "no"):
value = False
else:
value = default or ""
elif argument_type == "int":
if value.isdigit():
value = int(value)
elif not value:
value = default
else:
value = ""
elif not value:
value = default
arguments[argument] = value
success_message = instantly.expand(template_name, arguments)
if success_message != False:
print("Successfully ran '{0}'!".format(template_name))
if success_message:
print(success_message)
if __name__ == "__main__":
main()
| timothycrosley/instantly | instantly/main.py | Python | gpl-2.0 | 8,313 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vps_monitor.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| J22Melody/VPS-Monitor | manage.py | Python | gpl-2.0 | 254 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide the SubstKeywords class that will replace keywords in a passed
string with information about the person/marriage/spouse. For sample:
foo = SubstKeywords(database, person_handle)
print foo.replace_and_clean(['$n was born on $b.'])
Will return a value such as:
Mary Smith was born on 3/28/1923.
"""
from __future__ import print_function
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.datehandler import displayer
from gramps.gen.lib import EventType
from gramps.gen.utils.db import get_birth_or_fallback, get_death_or_fallback
from gramps.gen.constfunc import STRTYPE, cuni
#------------------------------------------------------------------------
#
# Local constants
#
#------------------------------------------------------------------------
class TextTypes():
"""Four enumerations that are used to for the four main parts of a string.
and used for states. Separator is not used in states.
text -> remove or display
remove -> display
"""
separator, text, remove, display = list(range(4))
TXT = TextTypes()
#------------------------------------------------------------------------
#
# Formatting classes
#
#------------------------------------------------------------------------
class GenericFormat(object):
"""A Generic parsing class. Will be subclassed by specific format strings
"""
def __init__(self, string_in):
self.string_in = string_in
def _default_format(self, item):
""" The default format if there is no format string """
pass
def is_blank(self, item):
""" if the information is not known (item is None), remove the format
string information from the input string if any.
"""
if item is None:
self.string_in.remove_start_end("(", ")")
return True
return False
def generic_format(self, item, code, uppr, function):
"""the main parsing engine.
Needed are the following: the input string
code - List of one character (string) codes (all lowercase)
uppr - list of one character (string) codes that can be uppercased
each needs to have a lowercase equivalent in code
function - list of functions.
there is a one to one relationship with character codes and functions.
"""
if self.string_in.this != "(":
return self._default_format(item)
self.string_in.step()
main = VarString()
separator = SeparatorParse(self.string_in)
#code given in args
#function given in args
while self.string_in.this and self.string_in.this != ")":
#Check to see if _in.this is in code
to_upper = False
if uppr.find(self.string_in.this) != -1:
#and the result should be uppercased.
to_upper = True
where = code.find(self.string_in.this.lower())
else:
where = code.find(self.string_in.this)
if where != -1:
self.string_in.step()
tmp = function[where]()
if to_upper:
tmp = tmp.upper()
if tmp == "" or tmp is None:
main.add_remove()
elif isinstance(tmp, VarString): #events cause this
main.extend(tmp)
else:
main.add_variable(tmp)
elif separator.is_a():
main.add_separator(separator.parse_format())
else:
main.add_text(self.string_in.parse_format())
if self.string_in.this == ")":
self.string_in.step()
return main
#------------------------------------------------------------------------
# Name Format strings
#------------------------------------------------------------------------
class NameFormat(GenericFormat):
""" The name format class.
If no format string, the name is displayed as per preference options
otherwise, parse through a format string and put the name parts in
"""
def get_name(self, person):
""" A helper method for retrieving the person's name """
if person:
return person.get_primary_name()
return None
def _default_format(self, name):
""" display the name as set in preferences """
return name_displayer.sorted_name(name)
def parse_format(self, name):
""" Parse the name """
if self.is_blank(name):
return
def common():
""" return the common name of the person """
return (name.get_call_name() or
name.get_first_name().split(' ')[0])
code = "tfcnxslg"
upper = code.upper()
function = [name.get_title, #t
name.get_first_name, #f
name.get_call_name, #c
name.get_nick_name, #n
common, #x
name.get_suffix, #s
name.get_surname, #l
name.get_family_nick_name #g
]
return self.generic_format(name, code, upper, function)
#------------------------------------------------------------------------
# Date Format strings
#------------------------------------------------------------------------
class DateFormat(GenericFormat):
""" The date format class.
If no format string, the date is displayed as per preference options
otherwise, parse through a format string and put the date parts in
"""
def get_date(self, event):
""" A helper method for retrieving a date from an event """
if event:
return event.get_date_object()
return None
def _default_format(self, date):
return displayer.display(date)
def __count_chars(self, char, max_amount):
""" count the year/month/day codes """
count = 1 #already have seen/passed one
while count < max_amount and self.string_in.this == char:
self.string_in.step()
count = count +1
return count
def parse_format(self, date):
""" Parse the name """
if self.is_blank(date):
return
def year():
""" The year part only """
year = cuni(date.get_year())
count = self.__count_chars("y", 4)
if year == "0":
return
if count == 1: #found 'y'
if len(year) == 1:
return year
elif year[-2] == "0":
return year[-1]
else:
return year[-2:]
elif count == 2: #found 'yy'
tmp = "0" + year
return tmp[-2:]
elif count == 3: #found 'yyy'
if len(year) > 2:
return year
else:
tmp = "00" + year
return tmp[-3:]
else: #count == 4 #found 'yyyy'
tmp = "000" + year
return tmp[-4:]
def month(char_found = "m"):
""" The month part only """
month = cuni(date.get_month())
count = self.__count_chars(char_found, 4)
if month == "0":
return
if count == 1:
return month
elif count == 2: #found 'mm'
tmp = "0" + month
return tmp[-2:]
elif count == 3: #found 'mmm'
return displayer.short_months[int(month)]
else: #found 'mmmm'
return displayer.long_months[int(month)]
def month_up():
return month("M").upper()
def day():
""" The day part only """
day = cuni(date.get_day())
count = self.__count_chars("d", 2)
if day == "0": #0 means not defined!
return
if count == 1: #found 'd'
return day
else: #found 'dd'
tmp = "0" + day
return tmp[-2:]
def modifier():
#ui_mods taken from date.py def lookup_modifier(self, modifier):
ui_mods = ["", _("before"), _("after"), _("about"),
"", "", ""]
return ui_mods[date.get_modifier()].capitalize()
code = "ymdMo"
upper = "O"
function = [year, month, day, month_up, modifier]
return self.generic_format(date, code, upper, function)
#------------------------------------------------------------------------
# Place Format strings
#------------------------------------------------------------------------
class PlaceFormat(GenericFormat):
""" The place format class.
If no format string, the place is displayed as per preference options
otherwise, parse through a format string and put the place parts in
"""
def get_place(self, database, event):
""" A helper method for retrieving a place from an event """
if event:
bplace_handle = event.get_place_handle()
if bplace_handle:
return database.get_place_from_handle(bplace_handle)
return None
def _default_format(self, place):
return place.get_title()
def parse_format(self, place):
""" Parse the place """
if self.is_blank(place):
return
code = "elcuspnitxy"
upper = code.upper()
function = [place.get_main_location().get_street,
place.get_main_location().get_locality,
place.get_main_location().get_city,
place.get_main_location().get_county,
place.get_main_location().get_state,
place.get_main_location().get_postal_code,
place.get_main_location().get_country,
place.get_main_location().get_parish,
place.get_title,
place.get_longitude,
place.get_latitude
]
return self.generic_format(place, code, upper, function)
#------------------------------------------------------------------------
# Event Format strings
#------------------------------------------------------------------------
class EventFormat(GenericFormat):
""" The event format class.
If no format string, the event description is displayed
otherwise, parse through the format string and put in the parts
dates and places can have their own format strings
"""
def __init__(self, database, _in):
self.database = database
GenericFormat.__init__(self, _in)
def _default_format(self, event):
if event is None:
return
else:
return event.get_description()
def __empty_format(self):
""" clear out a sub format string """
self.string_in.remove_start_end("(", ")")
return
def __empty_attrib(self):
""" clear out an attribute name """
self.string_in.remove_start_end("[", "]")
return
def parse_format(self, event):
""" Parse the event format string.
let the date or place classes handle any sub-format strings """
if self.is_blank(event):
return
def format_date():
""" start formatting a date in this event """
date_format = DateFormat(self.string_in)
return date_format.parse_format(date_format.get_date(event))
def format_place():
""" start formatting a place in this event """
place_format = PlaceFormat(self.string_in)
place = place_format.get_place(self.database, event)
return place_format.parse_format(place)
def format_attrib():
""" Get the name and then get the attributes value """
#Event's Atribute
attrib_parse = AttributeParse(self.string_in)
#self.string_in.step()
name = attrib_parse.get_name()
if name:
return attrib_parse.get_attribute(event.get_attribute_list(),
name)
else:
return
code = "ndDia"
upper = ""
function = [event.get_description,
format_date,
format_place,
event.get_gramps_id,
format_attrib
]
return self.generic_format(event, code, upper, function)
def parse_empty(self):
""" remove the format string """
code = "dDa"
function = [self.__empty_format, self.__empty_format,
self.__empty_attrib]
return self.generic_format(None, code, "", function)
#------------------------------------------------------------------------
# Gallery Format strings
#------------------------------------------------------------------------
class GalleryFormat(GenericFormat):
""" The gallery format class.
If no format string, the photo description is displayed
otherwise, parse through the format string and put in the parts
dates (no places) can have their own format strings
"""
def __init__(self, database, _in):
self.database = database
GenericFormat.__init__(self, _in)
def _default_format(self, photo):
if photo is None:
return
else:
return photo.get_description()
def __empty_format(self):
""" clear out a sub format string """
self.string_in.remove_start_end("(", ")")
return
def __empty_attrib(self):
""" clear out an attribute name """
self.string_in.remove_start_end("[", "]")
return
def parse_format(self, photo):
""" Parse the photo format string.
let the date or place classes handle any sub-format strings """
if self.is_blank(photo):
return
def format_date():
""" start formatting a date in this photo """
date_format = DateFormat(self.string_in)
return date_format.parse_format(date_format.get_date(photo))
def format_attrib():
""" Get the name and then get the attributes value """
#photo's Atribute
attrib_parse = AttributeParse(self.string_in)
name = attrib_parse.get_name()
if name:
return attrib_parse.get_attribute(photo.get_attribute_list(),
name)
else:
return
code = "ndia"
upper = ""
function = [photo.get_description,
format_date,
photo.get_gramps_id,
format_attrib
]
return self.generic_format(photo, code, upper, function)
def parse_empty(self):
""" remove the format string """
code = "da"
function = [self.__empty_format, self.__empty_attrib]
return self.generic_format(None, code, "", function)
#------------------------------------------------------------------------
#
# ConsumableString - The Input string class
#
#------------------------------------------------------------------------
class ConsumableString(object):
"""
A simple string implementation with extras to help with parsing.
This will contain the string to be parsed. or string in.
There will only be one of these for each processed line.
"""
def __init__(self, string):
self.__this_string = string
self.__setup()
def __setup(self):
""" update class attributes this and next """
if len(self.__this_string) > 0:
self.this = self.__this_string[0]
else:
self.this = None
if len(self.__this_string) > 1:
self.next = self.__this_string[1]
else:
self.next = None
def step(self):
""" remove the first char from the string """
self.__this_string = self.__this_string[1:]
self.__setup()
return self.this
def step2(self):
""" remove the first two chars from the string """
self.__this_string = self.__this_string[2:]
self.__setup()
return self.this
def remove_start_end(self, start, end):
""" Removes a start, end block from the string if there """
if self.this == start:
self.text_to_next(end)
def __get_a_char_of_text(self):
""" Removes one char of TEXT from the string and returns it. """
if self.this == "\\":
if self.next == None:
rtrn = "\\"
else:
rtrn = self.next
self.step2()
else:
rtrn = self.this
self.step()
return rtrn
def text_to_next(self, char):
""" return/remove a format strings from here """
new_str = ""
while self.this is not None and self.this != char:
new_str += self.__get_a_char_of_text()
if self.this == char:
self.step()
return new_str
def is_a(self):
return True
def parse_format(self):
rtrn = self.__get_a_char_of_text()
if rtrn:
return rtrn
return ''
#------------------------------------------------------------------------
#
# VarString class - The Output string class
#
#------------------------------------------------------------------------
class VarString(object):
"""
The current state of the entire string (integer from TextTypes)
A list to hold tuple object (integer from TextTypes, string)
This will contain the string that will be displayed. or string out.
it is used for groups and format strings.
"""
def __init__(self, start_state = TXT.remove):
self.state = start_state #overall state of the string.
self._text = [] #list of tuples (TXT.?, string)
def __update_state(self, new_status):
if new_status > self.state:
self.state = new_status
def add_text(self, text):
self._text.append((TXT.text, text))
def add_variable(self, text):
self.state = TXT.display
self._text.append((TXT.text, text))
def add_remove(self):
self.__update_state(TXT.remove)
self._text.append((TXT.remove, ""))
def add_separator(self, text):
self._text.append((TXT.separator, text))
def get_final(self):
#if self.state == TXT.remove:
# return (TXT.remove, "")
curr_string = ""
index = 0
while index < len(self._text):
if self._text[index][0] == TXT.text:
curr_string += self._text[index][1]
index = index + 1
continue #while self._text:
if index +1 == len(self._text):
if self._text[index][0] == TXT.separator and curr_string != '':
curr_string += self._text[index][1]
index = index + 1
break #while self._text:
type_0_1 = (self._text[index][0], self._text[index+1][0])
#if type_0_1 == (TXT.remove, TXT.remove):
# pass
if type_0_1 == (TXT.remove, TXT.separator):
index = index + 1
#elif type_0_1 == (TXT.remove, TXT.text):
# pass
elif type_0_1 == (TXT.separator, TXT.remove):
index = index + 1
#elif type_0_1 == (TXT.separator, TXT.separator):
# pass
elif type_0_1 == (TXT.separator, TXT.text):
curr_string += self._text[index][1]
#else:
# print "#oops Should never get here."
index = index + 1
#return what we have
return (self.state, curr_string)
print("===" + str(self.state) + " '" + str(curr_string) + "'")
def extend(self, acquisition):
"""
acquisition is a VarString object
Merge the content of acquisition into this place.
"""
self.__update_state(acquisition.state)
if acquisition.state != TXT.display:
#The sub {} was TXT.remove. We don't want to simply ignore it.
self.add_remove() #add a remove que here to note it.
return
self._text.extend(acquisition._text)
#------------------------------------------------------------------------
#
# Parsers
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# SeparatorParse
#------------------------------------------------------------------------
class SeparatorParse(object):
""" parse out a separator """
def __init__(self, consumer_in):
self._in = consumer_in
def is_a(self):
return self._in.this == "<"
def parse_format(self):
if not self.is_a():
return
""" get the text and return it """
self._in.step()
return self._in.text_to_next(">")
#------------------------------------------------------------------------
# AttributeParse
#------------------------------------------------------------------------
class AttributeParse(object):
""" Parse attributes """
def __init__(self, consumer_in):
self._in = consumer_in
def get_name(self):
""" Gets a name inside a [] block """
if self._in.this != "[":
return
self._in.step()
return self._in.text_to_next("]")
def get_attribute(self, attrib_list, attrib_name):
""" Get an attribute by name """
if attrib_name == "":
return
for attr in attrib_list:
if str(attr.get_type()) == attrib_name:
return str(attr.get_value())
return
def is_a(self):
""" check """
return self._in.this == "a"
def parse_format(self, attrib_list):
""" Get the attribute and add it to the string out """
name = self.get_name()
return self.get_attribute(attrib_list, name)
#------------------------------------------------------------------------
# VariableParse
#------------------------------------------------------------------------
class VariableParse(object):
""" Parse the individual variables """
def __init__(self, friend, database, consumer_in):
self.friend = friend
self.database = database
self._in = consumer_in
def is_a(self):
""" check """
return self._in.this == "$" and self._in.next is not None and \
"nsijbBdDmMvVauetTpP".find(self._in.next) != -1
def get_event_by_type(self, marriage, e_type):
""" get an event from a type """
if marriage is None:
return None
for e_ref in marriage.get_event_ref_list():
if not e_ref:
continue
event = self.friend.database.get_event_from_handle(e_ref.ref)
if event.get_type() == e_type:
return event
return None
def get_event_by_name(self, person, event_name):
""" get an event from a name. """
if not person:
return None
for e_ref in person.get_event_ref_list():
if not e_ref:
continue
event = self.friend.database.get_event_from_handle(e_ref.ref)
if event.get_type().is_type(event_name):
return event
return None
def empty_item(self, item):
""" return false if there is a valid item(date or place).
Otherwise
add a TXT.remove marker in the output string
remove any format strings from the input string
"""
if item is not None:
return False
self._in.remove_start_end("(", ")")
return True
def empty_attribute(self, person):
""" return false if there is a valid person.
Otherwise
add a TXT.remove marker in the output string
remove any attribute name from the input string
"""
if person:
return False
self._in.remove_start_end("[", "]")
return True
def __parse_date(self, event):
""" sub to process a date
Given an event, get the date object, process the format,
return the result """
date_f = DateFormat(self._in)
date = date_f.get_date(event)
if self.empty_item(date):
return
return date_f.parse_format(date)
def __parse_place(self, event):
""" sub to process a date
Given an event, get the place object, process the format,
return the result """
place_f = PlaceFormat(self._in)
place = place_f.get_place(self.database, event)
if self.empty_item(place):
return
return place_f.parse_format(place)
def __parse_name(self, person):
name_format = NameFormat(self._in)
name = name_format.get_name(person)
return name_format.parse_format(name)
def __parse_id(self, first_class_object):
if first_class_object is not None:
return first_class_object.get_gramps_id()
else:
return
def __parse_event(self, person, attrib_parse):
event = self.get_event_by_name(person, attrib_parse.get_name())
event_f = EventFormat(self.database, self._in)
if event:
return event_f.parse_format(event)
else:
event_f.parse_empty()
return
def __get_photo(self, person_or_marriage):
""" returns the first photo in the media list or None """
media_list = person_or_marriage.get_media_list()
for media_ref in media_list:
media_handle = media_ref.get_reference_handle()
media = self.database.get_object_from_handle(media_handle)
mime_type = media.get_mime_type()
if mime_type and mime_type.startswith("image"):
return media
return None
def __parse_photo(self, person_or_marriage):
photo_f = GalleryFormat(self.database, self._in)
if person_or_marriage is None:
return photo_f.parse_empty()
photo = self.__get_photo(person_or_marriage)
if photo:
return photo_f.parse_format(photo)
else:
return photo_f.parse_empty()
def parse_format(self):
"""Parse the $ variables. """
if not self.is_a():
return
attrib_parse = AttributeParse(self._in)
next_char = self._in.next
self._in.step2()
if next_char == "n":
#Person's name
return self.__parse_name(self.friend.person)
elif next_char == "s":
#Souses name
return self.__parse_name(self.friend.spouse)
elif next_char == "i":
#Person's Id
return self.__parse_id(self.friend.person)
elif next_char == "j":
#Marriage Id
return self.__parse_id(self.friend.family)
elif next_char == "b":
#Person's Birth date
if self.empty_item(self.friend.person):
return
return self.__parse_date(
get_birth_or_fallback(self.friend.database, self.friend.person))
elif next_char == "d":
#Person's Death date
if self.empty_item(self.friend.person):
return
return self.__parse_date(
get_death_or_fallback(self.friend.database, self.friend.person))
elif next_char == "m":
#Marriage date
if self.empty_item(self.friend.family):
return
return self.__parse_date(
self.get_event_by_type(self.friend.family,
EventType.MARRIAGE))
elif next_char == "v":
#Divorce date
if self.empty_item(self.friend.family):
return
return self.__parse_date(
self.get_event_by_type(self.friend.family,
EventType.DIVORCE))
elif next_char == "T":
#Todays date
date_f = DateFormat(self._in)
from gramps.gen.lib.date import Today
date = Today()
if self.empty_item(date):
return
return date_f.parse_format(date)
elif next_char == "B":
#Person's birth place
if self.empty_item(self.friend.person):
return
return self.__parse_place(
get_birth_or_fallback(self.friend.database, self.friend.person))
elif next_char == "D":
#Person's death place
if self.empty_item(self.friend.person):
return
return self.__parse_place(
get_death_or_fallback(self.friend.database, self.friend.person))
elif next_char == "M":
#Marriage place
if self.empty_item(self.friend.family):
return
return self.__parse_place(
self.get_event_by_type(self.friend.family,
EventType.MARRIAGE))
elif next_char == "V":
#Divorce place
if self.empty_item(self.friend.family):
return
return self.__parse_place(
self.get_event_by_type(self.friend.family,
EventType.DIVORCE))
elif next_char == "a":
#Person's Atribute
if self.empty_attribute(self.friend.person):
return
return attrib_parse.parse_format(
self.friend.person.get_attribute_list())
elif next_char == "u":
#Marriage Atribute
if self.empty_attribute(self.friend.family):
return
return attrib_parse.parse_format(
self.friend.family.get_attribute_list())
elif next_char == "e":
#person event
return self.__parse_event(self.friend.person, attrib_parse)
elif next_char == "t":
#person event
return self.__parse_event(self.friend.family, attrib_parse)
elif next_char == 'p':
#photo for the person
return self.__parse_photo(self.friend.person)
elif next_char == 'P':
#photo for the marriage
return self.__parse_photo(self.friend.family)
#------------------------------------------------------------------------
#
# SubstKeywords
#
#------------------------------------------------------------------------
class SubstKeywords(object):
"""Accepts a person/family with format lines and returns a new set of lines
using variable substitution to make it.
The individual variables are defined with the classes that look for them.
Needed:
Database object
person_handle
This will be the center person for the display
family_handle
this will specify the specific family/spouse to work with.
If none given, then the first/preferred family/spouse is used
"""
def __init__(self, database, person_handle, family_handle=None):
"""get the person and find the family/spouse to use for this display"""
self.database = database
self.person = database.get_person_from_handle(person_handle)
self.family = None
self.spouse = None
self.line = None #Consumable_string - set below
if self.person is None:
return
fam_hand_list = self.person.get_family_handle_list()
if fam_hand_list:
if family_handle in fam_hand_list:
self.family = database.get_family_from_handle(family_handle)
else:
#Error. fam_hand_list[0] below may give wrong marriage info.
#only here because of OLD specifications. Specs read:
# * $S/%S
# Displays the name of the person's preferred ...
# 'preferred' means FIRST.
#The first might not be the correct marriage to display.
#else: clause SHOULD be removed.
self.family = database.get_family_from_handle(fam_hand_list[0])
father_handle = self.family.get_father_handle()
mother_handle = self.family.get_mother_handle()
self.spouse = None
if father_handle == person_handle:
if mother_handle:
self.spouse = database.get_person_from_handle(mother_handle)
else:
if father_handle:
self.spouse = database.get_person_from_handle(father_handle)
def __parse_line(self):
"""parse each line of text and return the new displayable line
There are four things we can find here
A {} group which will make/end as needed.
A <> separator
A $ variable - Handled separately
or text
"""
stack_var = []
curr_var = VarString(TXT.text)
#First we are going take care of all variables/groups
#break down all {} (groups) and $ (vars) into either
#(TXT.text, resulting_string) or (TXT.remove, '')
variable = VariableParse(self, self.database, self.line) # $
while self.line.this:
if self.line.this == "{":
#Start of a group
#push what we have onto the stack
stack_var.append(curr_var)
#Setup
curr_var = VarString()
#step
self.line.step()
elif self.line.this == "}" and len(stack_var) > 0: #End of a group
#add curr to what is on the (top) stack and pop into current
#or pop the stack into current and add TXT.remove
direction = curr_var.state
if direction == TXT.display:
#add curr onto the top slot of the stack
stack_var[-1].extend(curr_var)
#pop what we have on the stack
curr_var = stack_var.pop()
if direction == TXT.remove:
#add remove que
curr_var.add_remove()
#step
self.line.step()
elif variable.is_a(): # $ (variables)
rtrn = variable.parse_format()
if rtrn is None:
curr_var.add_remove()
elif isinstance(rtrn, VarString):
curr_var.extend(rtrn)
else:
curr_var.add_variable(rtrn)
elif self.line.this == "<": # separator
self.line.step()
curr_var.add_separator(self.line.text_to_next(">"))
else: #regular text
curr_var.add_text(self.line.parse_format())
#the stack is for groups/subgroup and may contain items
#if the user does not close his/her {}
#squash down the stack
while stack_var:
direction = curr_var.state
if direction == TXT.display:
#add curr onto the top slot of the stack
stack_var[-1].extend(curr_var)
#pop what we have on the stack
curr_var = stack_var.pop()
if direction == TXT.remove:
#add remove que
curr_var.add_remove()
#step
self.line.step()
#return what we have
return curr_var.get_final()
def __main_level(self):
#Check only if the user wants to not display the line if TXT.remove
remove_line_tag = False
if self.line.this == "-":
remove_line_tag = True
self.line.step()
state, line = self.__parse_line()
if state is TXT.remove and remove_line_tag:
return None
return line
def replace_and_clean(self, lines):
"""
return a new array of lines with all of the substitutions done
"""
new = []
for this_line in lines:
if this_line == "":
new.append(this_line)
continue
#print "- ", this_line
self.line = ConsumableString(this_line)
new_line = self.__main_level()
#print "+ ", new_line
if new_line is not None:
new.append(new_line)
if new == []:
new = [""]
return new
#Acts 20:35 (New International Version)
#In everything I did, I showed you that by this kind of hard work
#we must help the weak, remembering the words the Lord Jesus himself
#said: 'It is more blessed to give than to receive.'
if __name__ == '__main__':
#-------------------------------------------------------------------------
#
# For Testing everything except VariableParse, SubstKeywords and EventFormat
# apply it as a script:
#
# ==> in command line do "PYTHONPATH=??? python libsubstkeyword.py"
#
# You will need to put in your own path to the src directory
#
#-------------------------------------------------------------------------
# pylint: disable-msg=C0103
def combinations(c, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(range(c))
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(list(range(r))):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def main_level_test(_in, testing_class, testing_what):
"""This is a mini def __main_level(self):
"""
main = LevelParse(_in)
sepa = SeparatorParse(_in)
test = testing_class(_in)
while _in.this:
if main.is_a():
main.parse_format(_in)
elif sepa.is_a():
sepa.parse_format(main)
elif _in.this == "$":
_in.step()
main.add_variable(
test.parse_format(testing_what))
else:
_in.parse_format(main)
main.combine_all()
state, line = main.get_string()
if state is TXT.remove:
return None
else:
return line
from gramps.gen.lib.date import Date
y_or_n = ()
date_to_test = Date()
def date_set():
date_to_test.set_yr_mon_day(
1970 if 0 in y_or_n else 0,
9 if 1 in y_or_n else 0,
3 if 2 in y_or_n else 0
)
#print date_to_test
line_in = "<Z>$(yyy) <a>$(<Z>Mm)<b>$(mm){<c>$(d)}{<d>$(yyyy)<e>}<f>$(yy)"
consume_str = ConsumableString(line_in)
print(line_in)
print("#None are known")
tmp = main_level_test(consume_str, DateFormat, date_to_test)
print(tmp)
print("Good" if tmp == " " else "!! bad !!")
print()
print()
print("#One is known")
answer = []
for y_or_n in combinations(3, 1):
date_set()
consume_str = ConsumableString(line_in)
tmp = main_level_test(consume_str, DateFormat, date_to_test)
print(tmp)
answer.append(tmp)
print("Good" if answer == [
"1970 d1970f70",
" a99b09",
" c3"
] else "!! bad !!")
print()
print()
print("#Two are known")
answer = []
for y_or_n in combinations(3, 2):
date_set()
consume_str = ConsumableString(line_in)
tmp = main_level_test(consume_str, DateFormat, date_to_test)
print(tmp)
answer.append(tmp)
print("Good" if answer == [
"1970 a99b09d1970f70",
"1970 c3d1970f70",
" a99b09c3"
] else "!! bad !!")
print()
print()
print("#All are known")
answer = []
y_or_n = (0, 1, 2)
date_set()
consume_str = ConsumableString(line_in)
tmp = main_level_test(consume_str, DateFormat, date_to_test)
print(tmp)
answer.append(tmp)
print("Good" if answer == ["1970 a99b09c3d1970f70"
] else "!! bad !!")
import sys
sys.exit()
print()
print()
print("=============")
print("=============")
from gramps.gen.lib.name import Name
y_or_n = ()
name_to_test = Name()
def name_set():
#code = "tfcnxslg"
name_to_test.set_call_name("Bob" if 0 in y_or_n else "")
name_to_test.set_title("Dr." if 1 in y_or_n else "")
name_to_test.set_first_name("Billy" if 2 in y_or_n else "")
name_to_test.set_nick_name("Buck" if 3 in y_or_n else "")
name_to_test.set_suffix("IV" if 4 in y_or_n else "")
#now can we put something in for the last name?
name_to_test.set_family_nick_name("The Clubs" if 5 in y_or_n else "")
line_in = "{$(c)$(t)<1>{<2>$(f)}{<3>$(n){<0> <0>}<4>$(x)}$(s)<5>$(l)<6>$(g)<0>"
consume_str = ConsumableString(line_in)
print()
print()
print(line_in)
print("#None are known")
tmp = main_level_test(consume_str, NameFormat, name_to_test)
print(tmp)
print("Good" if tmp == None else "!! bad !!")
print()
print()
print("#Two are known")
answer = []
for y_or_n in combinations(6, 2):
name_set()
consume_str = ConsumableString(line_in)
tmp = main_level_test(consume_str, NameFormat, name_to_test)
print(tmp)
answer.append(tmp)
print("Good" if answer == [
"BobDr.4Bob",
"Bob2Billy4Bob",
"Bob3Buck4Bob",
"Bob4BobIV",
"Bob4BobThe Clubs",
"Dr.2Billy4Billy",
"Dr.3Buck",
"Dr.1IV",
"Dr.6The Clubs",
"Billy3Buck4Billy",
"Billy4BillyIV",
"Billy4BillyThe Clubs",
"BuckIV",
"BuckThe Clubs",
"IV6The Clubs"
] else "!! bad !!")
print()
print()
print("#All are known")
y_or_n = (0, 1, 2, 3, 4, 5)
name_set()
consume_str = ConsumableString(line_in)
answer = main_level_test(consume_str, NameFormat, name_to_test)
print(answer)
print("Good" if answer == "BobDr.2Billy3Buck4BobIV6The Clubs" \
else "!! bad !!")
print()
print()
print("=============")
print("=============")
from gramps.gen.lib.place import Place
y_or_n = ()
place_to_test = Place()
def place_set():
#code = "elcuspnitxy"
main_loc = place_to_test.get_main_location()
main_loc.set_street(
"Lost River Ave." if 0 in y_or_n else ""
)
main_loc.set_locality(
"Second district" if 1 in y_or_n else ""
)
main_loc.set_city(
"Arco" if 2 in y_or_n else ""
)
main_loc.set_county(
"Butte" if 3 in y_or_n else ""
)
main_loc.set_state(
"Idaho" if 4 in y_or_n else ""
)
main_loc.set_postal_code(
"83213" if 5 in y_or_n else ""
)
main_loc.set_country(
"USA" if 6 in y_or_n else ""
)
main_loc.set_parish(
"St Anns" if 7 in y_or_n else ""
)
place_to_test.set_title(
"Atomic City" if 8 in y_or_n else ""
)
place_to_test.set_longitude(
"N43H38'5\"N" if 9 in y_or_n else ""
)
place_to_test.set_latitude(
"W113H18'5\"W" if 10 in y_or_n else ""
)
#code = "txy"
line_in = "$(e)<1>{<2>$(l) <3> $(c)<4><0><5>{$(s)<6>$(p)<7>" + \
"{<1>$(n)<2>}<3>$(i<0>)<4>}<5>$(t)<6>$(x)<7>}<8>$(y)"
consume_str = ConsumableString(line_in)
print()
print()
print(line_in)
print("#None are known")
tmp = main_level_test(consume_str, PlaceFormat, place_to_test)
print(tmp)
print("Good" if tmp == "" else "!! bad !!")
print()
print()
print("#Three are known (string lengths only)")
answer = []
for y_or_n in combinations(11, 4):
place_set()
consume_str = ConsumableString(line_in)
tmp = main_level_test(consume_str, PlaceFormat, place_to_test)
#print tmp
answer.append(len(tmp))
print(answer)
print("Good" if answer == [38, 44, 44, 42, 46, 50, 49, 50, 40, 40, 38, 42,
46, 45, 46, 46, 44, 48, 52, 51, 52, 44, 48, 52, 51, 52, 46, 50, 49, 50,
54, 53, 54, 57, 58, 57, 28, 28, 26, 30, 34, 33, 34, 34, 32, 36, 40, 39,
40, 32, 36, 40, 39, 40, 34, 38, 37, 38, 42, 41, 42, 45, 46, 45, 30, 28,
32, 36, 35, 36, 28, 32, 36, 35, 36, 30, 34, 33, 34, 38, 37, 38, 41, 42,
41, 34, 38, 42, 41, 42, 36, 40, 39, 40, 44, 43, 44, 47, 48, 47, 36, 40,
39, 40, 44, 43, 44, 47, 48, 47, 42, 41, 42, 45, 46, 45, 49, 50, 49, 53,
28, 28, 26, 30, 34, 33, 34, 34, 32, 36, 40, 39, 40, 32, 36, 40, 39, 40,
34, 38, 37, 38, 42, 41, 42, 45, 46, 45, 30, 28, 32, 36, 35, 36, 28, 32,
36, 35, 36, 30, 34, 33, 34, 38, 37, 38, 41, 42, 41, 34, 38, 42, 41, 42,
36, 40, 39, 40, 44, 43, 44, 47, 48, 47, 36, 40, 39, 40, 44, 43, 44, 47,
48, 47, 42, 41, 42, 45, 46, 45, 49, 50, 49, 53, 19, 17, 21, 25, 24, 25,
17, 21, 25, 24, 25, 19, 23, 22, 23, 27, 26, 27, 30, 31, 30, 23, 27, 31,
30, 31, 25, 29, 28, 29, 33, 32, 33, 36, 37, 36, 25, 29, 28, 29, 33, 32,
33, 36, 37, 36, 31, 30, 31, 34, 35, 34, 38, 39, 38, 42, 19, 23, 27, 26,
27, 21, 25, 24, 25, 29, 28, 29, 32, 33, 32, 21, 25, 24, 25, 29, 28, 29,
32, 33, 32, 27, 26, 27, 30, 31, 30, 34, 35, 34, 38, 27, 31, 30, 31, 35,
34, 35, 38, 39, 38, 33, 32, 33, 36, 37, 36, 40, 41, 40, 44, 33, 32, 33,
36, 37, 36, 40, 41, 40, 44, 38, 39, 38, 42, 46] else "!! bad !!")
| Forage/Gramps | gramps/plugins/lib/libsubstkeyword.py | Python | gpl-2.0 | 48,355 |
#!/usr/bin/env python
from __future__ import division
import sys, os
import numpy as np
import readnew
from glob import glob
#import re
import yaml
import os.path
import time # Need to wait some time if file is being written
# Example: /home/jordan/sad-monte-carlo/
filename_location = sys.argv[1]
# Example: data/samc-1e4-256-cpp-reference-lndos.dat
reference = sys.argv[2]
# Used for where we save the data.: s000/periodic-ww1.50-ff0.17-N256
filebase = sys.argv[3]
# The number to divide moves by! N is added back in comparison-plot
N = int(sys.argv[4])
# Energy range
Smin = int(sys.argv[5])
Smax = int(sys.argv[6])
# Are you comparing to a yaml reference?
yamlRef = bool(sys.argv[7])
filename = sys.argv[8:]
print(('filenames are ', filename))
for f in filename:
name = '%s.yaml' % (f)
print(('trying filename ', name))
while not os.path.exists(filename_location + name):
print('I am waiting for file to be written.')
time.sleep(30)
# Read YAML file
if os.path.isfile(filename_location + name):
with open(filename_location + name, 'r') as stream:
yaml_data = yaml.load(stream)
else:
raise ValueError("%s isn't a file!" % (filename_location + name))
#print(data_loaded)
data = yaml_data
data['bins']['histogram'] = np.array(data['bins']['histogram'])
data['bins']['lnw'] = np.array(data['bins']['lnw'])
data['movies']['energy']
minyaml = data['movies']['energy'].index(-Smax)
maxyaml = data['movies']['energy'].index(-Smin)
#print(data['bins']['lnw'])
moves = data['moves']
data['movies']['entropy'] = np.array(data['movies']['entropy'])
lndos = data['movies']['entropy']
N_save_times = len(data['movies']['entropy'])
ref = reference
if ref[:len('data/')] != 'data/':
ref = 'data/' + ref
maxref = Smax #int(readnew.max_entropy_state(ref))
minref = Smin # int(readnew.min_important_energy(ref))
n_energies = int(minref - maxref+1)
#print maxref, minref
try:
eref, lndosref, Nrt_ref = readnew.e_lndos_ps(ref)
except:
eref, lndosref = readnew.e_lndos(ref)
errorinentropy = np.zeros(N_save_times)
maxerror = np.zeros(N_save_times)
for i in range(0, N_save_times):
# below just set average S equal between lndos and lndosref
if yamlRef:
# if using yaml as a reference the range is from 0 to len while for C++ the range is
# from maxref to minref + 1
norm_factor = np.mean(lndos[i][maxyaml:minyaml+1]) - np.mean(lndosref[0:(minyaml+1-maxyaml)])
doserror = lndos[i][maxyaml:minyaml+1][::-1] - lndosref[0:(minyaml+1-maxyaml)] - norm_factor
else:
norm_factor = np.mean(lndos[i][maxyaml:minyaml+1]) - np.mean(lndosref[maxref:minref+1])
doserror = lndos[i][maxyaml:minyaml+1][::-1] - lndosref[maxref:minref+1] - norm_factor
errorinentropy[i] = np.sum(abs(doserror))/len(doserror)
maxerror[i] = np.amax(doserror) - np.amin(doserror)
# remove N from moves in yaml file because N is added back in the
# comparison-plot script
moves = list(map(int, data['movies']['time']))
moves = [x / N for x in moves]
errorinentropy = errorinentropy[:len(moves)]
maxerror = maxerror[:len(moves)]
dirname = 'data/comparison/%s-%s' % (filebase, name.replace('.yaml', ''))
print('saving to', dirname)
try:
os.mkdir(dirname)
except OSError:
pass
else:
print(("Successfully created the directory %s " % dirname))
np.savetxt('%s/errors.txt' %(dirname),
np.c_[moves, errorinentropy, maxerror],
fmt = ('%.4g'),
delimiter = '\t',
header = 'iterations\t errorinentropy\t maxerror\t(generated with python %s' % ' '.join(sys.argv))
# The following is intended for testing whether there is a
# systematic error in any of our codes.
#np.savetxt('%s/error-vs-energy.txt' %(dirname),
#np.c_[eref, doserror],
#fmt = ('%.4g'),
#delimiter = '\t', header = 'E\t Serror')
| droundy/deft | papers/histogram/figs/yaml-comparison.py | Python | gpl-2.0 | 4,167 |
# -*- coding: utf-8 -*-
from distutils.core import setup
from distutils.command.build import build as _build
from distutils.command.install import install as _install
from distutils.command.install_data import install_data as _install_data
from distutils.command.sdist import sdist as _sdist
from distutils.extension import Extension
import os
import subprocess
# If src/compizconfig.pyx exists, build using Cython
if os.path.exists ("src/compizconfig.pyx"):
from Cython.Distutils import build_ext
ext_module_src = "src/compizconfig.pyx"
else: # Otherwise build directly from C source
from distutils.command.build_ext import build_ext
ext_module_src = "src/compizconfig.c"
version_file = open ("VERSION", "r")
version = version_file.read ().strip ()
if "=" in version:
version = version.split ("=")[1]
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries', '-R': 'runtime_library_dirs'}
cmd = ['pkg-config', '--libs', '--cflags']
tokens = subprocess.Popen (cmd + list(packages), stdout=subprocess.PIPE).communicate()[0].split ()
for t in tokens:
if '-L' in t[:2]:
kw.setdefault (flag_map.get ("-L"), []).append (t[2:])
if not os.getenv ("COMPIZ_DISABLE_RPATH") is "1":
kw.setdefault (flag_map.get ("-R"), []).append (t[2:])
elif '-I' in t[:2]:
kw.setdefault (flag_map.get ("-I"), []).append (t[2:])
elif '-l' in t[:2]:
kw.setdefault (flag_map.get ("-l"), []).append (t[2:])
return kw
VERSION_FILE = os.path.join (os.path.dirname (__file__), "VERSION")
pkgconfig_libs = subprocess.Popen (["pkg-config", "--libs", "libcompizconfig"], stdout=subprocess.PIPE, stderr=open(os.devnull, 'w')).communicate ()[0]
if len (pkgconfig_libs) is 0:
print ("CompizConfig Python [ERROR]: No libcompizconfig.pc found in the pkg-config search path")
print ("Ensure that libcompizonfig is installed or libcompizconfig.pc is in your $PKG_CONFIG_PATH")
exit (1);
libs = pkgconfig_libs[2:].split (" ")[0]
INSTALLED_FILES = "installed_files"
class install (_install):
def run (self):
_install.run (self)
outputs = self.get_outputs ()
length = 0
if self.root:
length += len (self.root)
if self.prefix:
length += len (self.prefix)
if length:
for counter in xrange (len (outputs)):
outputs[counter] = outputs[counter][length:]
data = "\n".join (outputs)
try:
file = open (INSTALLED_FILES, "w")
except:
self.warn ("Could not write installed files list %s" % \
INSTALLED_FILES)
return
file.write (data)
file.close ()
class install_data (_install_data):
def run (self):
def chmod_data_file (file):
try:
os.chmod (file, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)
except:
self.warn ("Could not chmod data file %s" % file)
_install_data.run (self)
map (chmod_data_file, self.get_outputs ())
class uninstall (_install):
def run (self):
try:
file = open (INSTALLED_FILES, "r")
except:
self.warn ("Could not read installed files list %s" % \
INSTALLED_FILES)
return
files = file.readlines ()
file.close ()
prepend = ""
if self.root:
prepend += self.root
if self.prefix:
prepend += self.prefix
if len (prepend):
for counter in xrange (len (files)):
files[counter] = prepend + files[counter].rstrip ()
for file in files:
print ("Uninstalling %s" % file)
try:
os.unlink (file)
except:
self.warn ("Could not remove file %s" % file)
class sdist (_sdist):
def run (self):
# Build C file
if os.path.exists ("src/compizconfig.pyx"):
from Cython.Compiler.Main import compile as cython_compile
cython_compile ("src/compizconfig.pyx")
# Run regular sdist
_sdist.run (self)
def add_defaults (self):
_sdist.add_defaults (self)
# Remove pyx source and add c source
if os.path.exists ("src/compizconfig.pyx"):
self.filelist.exclude_pattern ("src/compizconfig.pyx")
self.filelist.append ("src/compizconfig.c")
setup (
name = "compizconfig-python",
version = version,
description = "CompizConfig Python",
url = "http://www.compiz.org/",
license = "GPL",
maintainer = "Guillaume Seguin",
maintainer_email = "guillaume@segu.in",
cmdclass = {"uninstall" : uninstall,
"install" : install,
"install_data" : install_data,
"build_ext" : build_ext,
"sdist" : sdist},
ext_modules=[
Extension ("compizconfig", [ext_module_src],
**pkgconfig("libcompizconfig"))
]
)
| hedmo/compizconfig-python | setup.py | Python | gpl-2.0 | 5,125 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x02\x15\
\x16\x11\x2c\x9d\x48\x83\xbb\x00\x00\x03\x8a\x49\x44\x41\x54\x48\
\xc7\xad\x95\x4b\x68\x5c\x55\x18\xc7\x7f\xe7\xdc\x7b\x67\xe6\xce\
\x4c\x66\x26\x49\xd3\x24\x26\xa6\xc6\xf8\x40\x21\xa5\x04\xb3\x28\
\xda\x98\x20\xa5\x0b\xad\x55\xa8\x2b\xc5\x50\x1f\xa0\x6e\x34\x2b\
\x45\x30\x14\x02\xba\x52\x69\x15\x17\x66\x63\x45\x97\x95\xa0\xad\
\x0b\xfb\xc0\x06\x25\xb6\x71\x61\x12\x41\x50\xdb\x2a\x21\xd1\xe2\
\x24\xf3\x9e\xc9\xcc\xbd\xe7\x1c\x17\x35\x43\x1e\x33\x21\xb6\xfd\
\x56\x87\xf3\x9d\xfb\xfb\x1e\xf7\xff\x9d\x23\x8c\x31\x43\x95\xf4\
\x85\x1e\x3f\x3b\x35\xac\xfd\xcc\x43\xdc\xa4\x49\x3b\xfe\x9d\x1d\
\xdb\x7b\x22\x90\x78\xf8\xb2\x28\xa7\xbe\x7d\xc1\x4b\x9d\x79\xdf\
\x18\x15\xe5\x16\x99\x10\x56\xde\x69\xdc\x3f\x22\xfd\xec\xd4\xf0\
\xad\x04\x03\x18\xa3\xa2\x7e\x76\x6a\x58\xde\x68\x2b\xb4\x36\xf8\
\xbe\xc6\x18\x53\xdb\xef\xe7\xfa\xec\xed\x67\x63\x10\x42\x00\xf0\
\xfb\xd5\x65\x2a\x15\x45\xc7\x6d\x0d\x00\xc4\xa2\xc1\xaa\x6f\x0d\
\x3e\x6c\xab\xc2\x1c\x56\xa4\x77\x4b\xb0\xf2\x35\x15\x5f\x21\x85\
\xe0\xc8\x6b\x5f\x92\x2d\x37\x33\x39\xf9\x03\x27\x8e\x1f\xa2\xf7\
\xbe\x9d\x04\x1c\x0b\x37\xe4\xac\xff\xa6\x30\x87\xbd\xba\x00\x6a\
\x06\x79\xe5\xf5\xaf\x89\xd9\x92\xc5\xcc\x0a\xd9\x7c\x19\xcf\xe9\
\xe2\xe4\xa9\x2f\x78\x7c\xff\x01\x72\x85\x0a\x2b\x65\x1f\xa5\x4c\
\xb5\xb2\x55\x16\x80\xbd\x31\xda\xda\x20\x1f\x7d\x3e\xcd\xc2\xfd\
\x59\xa6\x93\x39\x92\xd1\x22\xea\x9b\x16\xce\x9d\x3f\xce\xe0\x83\
\x03\x24\x82\x59\x3a\xdb\x7b\x88\xc7\x82\x68\x63\x58\xc9\xcc\x62\
\x8c\x21\x18\xb0\x6a\xc3\x37\x06\x49\x16\xff\x24\x6b\xa5\x49\xbb\
\x25\xbc\xa2\xa6\x21\xbb\x40\x7f\xdf\x00\x83\xbd\x01\x8e\x3c\xd5\
\x45\xd7\x8e\x6b\x9c\x9c\x98\x25\x1a\xb6\xe8\xbe\x3d\xc2\xdd\x77\
\x44\x48\xc4\x1c\x22\xe1\xeb\x58\x59\xaf\xcf\xd3\x33\x29\x2e\x34\
\x2d\x91\x93\x3e\xbe\x34\x78\x01\xc5\xe2\x61\xc5\xae\x72\x8e\x70\
\xc8\xc2\x0d\x5a\xbc\xf5\xee\x2f\x9c\xfa\x3e\x86\x69\x7a\x8e\xcf\
\x26\xe6\xf9\x63\xa1\x44\xa1\xa4\xd0\xda\x6c\x0d\x2f\x15\x7c\xb4\
\x67\x28\x59\x0a\xcf\xd6\x54\xe2\x06\x13\x87\x2b\x6f\x68\xa6\x27\
\xaf\x31\x32\x36\xc7\xb2\x7f\x17\xef\x7d\x7c\x8c\x33\x67\xcf\x12\
\x70\x24\x4a\x69\xd6\x6a\x46\xd6\xd3\x70\x72\xa9\x82\x67\x34\x45\
\xad\x28\xdb\x1a\x15\x34\x98\xff\x46\xed\xef\x37\x0d\x99\xbf\x4a\
\x3c\x30\x38\xc0\xc8\x4b\xaf\x92\x5a\x9c\xe2\xe0\x23\x6d\x74\xb4\
\xba\x84\x5d\x0b\x29\x45\x7d\xb8\x94\x82\x96\xb6\x10\xf3\xc5\x12\
\x2a\xef\x53\x11\x1a\x63\xad\x3f\x93\x19\x85\xf1\xb1\x77\x58\x5a\
\xf8\x99\x97\x9f\xe9\xa6\x75\x47\x90\xc6\xb8\x43\xd8\xb5\xb6\xce\
\xfc\xfa\xfd\x00\xfb\x3e\xf4\xc8\x05\x35\xba\x5e\xeb\x46\x21\xf9\
\xcf\x0a\xa9\x8c\x87\xe3\x48\xdc\x90\xb5\x6e\x98\x6a\xaa\x65\xf2\
\x52\x92\x43\x2f\x5e\xc2\x8c\x02\x1a\x10\xf5\x07\xac\xc3\x75\x70\
\x83\x92\x80\xb3\xf9\xd0\x26\xf8\x8f\xb3\x29\xc6\x3e\xb8\x8c\x19\
\x35\x75\x6b\x7b\x7e\x3c\xca\x45\x0c\x7e\x49\x31\xf4\x58\x3b\xf7\
\xf6\x34\x90\x88\x39\x04\x1c\x59\x1f\xfe\xdb\xd5\x3c\x5f\x9d\x4b\
\x32\xfd\x44\xb2\xba\xd7\xfa\xb6\x60\xcf\xde\x16\xdc\x90\x45\x4c\
\x4a\x2a\x9e\x62\xfe\x4e\xc5\xc8\xc1\x4e\xda\x76\x86\xe8\xe9\x0a\
\xe3\xd8\x92\x58\xd4\xc6\xb2\x44\x6d\x78\x2a\x53\xe1\xca\x7c\x99\
\x63\x5d\xbf\x56\x9d\xbd\x9f\x44\x18\x7a\xba\x95\x27\x0f\xb4\xd3\
\xdc\x18\xc0\xf3\x0d\x52\x40\xd8\xb5\xb0\xa4\x20\x14\xb2\x70\x6c\
\x81\x63\xcb\xaa\x42\xd6\xfd\xb7\xf4\xec\xa3\x06\xa0\x50\x52\xd8\
\x4e\x1b\x7e\x4a\xd3\x31\xf9\x29\xcf\xfe\xd4\x49\x7f\x5f\x13\xfb\
\xfa\x9b\x71\x43\x92\x58\xd4\x21\x18\x90\xac\xde\xb0\x42\x50\x13\
\x58\x33\xf3\x88\x6b\xa1\xfd\x65\x96\xf2\x79\xc6\x43\x7b\xd8\x75\
\x38\xcc\x3d\xdd\xd1\xaa\xcf\x71\xe4\xff\x7f\x91\x56\x33\xaf\xea\
\x37\xe7\xa1\x94\x21\x16\xb5\xd1\x06\x2c\x29\x36\xf5\x72\x9b\x96\
\x95\xc0\xc4\xda\x9d\x78\x83\x43\x53\x22\x80\x65\x09\x1c\xfb\x86\
\xc1\x00\xe7\x25\x70\x14\x48\x6f\x1e\x22\x51\xe3\x75\xd9\xb6\xa5\
\x81\xa3\x32\xb1\xfb\xf4\x0c\x30\xb8\xb1\x82\x9b\xb0\x09\x60\x30\
\xb1\xfb\xf4\xcc\xbf\xa0\xe9\x6e\xae\x5a\xdf\x4b\x81\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x06\
\x07\x5c\x76\xa2\
\x00\x6f\
\x00\x65\x00\x71\x00\x5f\x00\x74\x00\x62\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x26\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x69\x23\xc2\x96\x6e\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| UdK-VPT/Open_eQuarter | oeq_tb/resources.py | Python | gpl-2.0 | 5,957 |
import numpy as np
# import text file, which has a determined format
a=open("test.dat")
b=open("test2.dat","w")
f=a.read()
g=f.split("\n")
nlines=6
nquestions=16
q1=[g[nlines*i:nlines*(i+1)] for i in range(nquestions)]
# these two lines can be commented if you want to shuffle last question also
last=q1[-1]
q2=q1[:-1]
np.random.shuffle(q2)
for q in q2:
alts=q[4:9]
np.random.shuffle(alts)
q=np.concatenate([q[:4],alts,q[-2:]])
for l in q:
b.write(str(l)+"\n")
# comment this block also if you want to shuffle last question
alts=last[4:9]
np.random.shuffle(alts)
last=np.concatenate([last[:4],alts,last[-2:]])
for l in last:
b.write(str(l)+"\n")
a.close()
b.close()
| saangel/randomcoding | QuestionShuffling.py | Python | gpl-2.0 | 678 |
# This is STMcmc, for super tree mcmc.
# Started 18 March 2011, first commit 22 March 2011.
import pf,func
from Var import var
import math,random,string,sys,time,copy,os,cPickle,types,glob
import numpy as np
from Glitch import Glitch
from TreePartitions import TreePartitions
from Constraints import Constraints
from Tree import Tree
import datetime
import itertools
try:
import bitarray
except ImportError:
pass
def choose(n, k):
"""
A fast way to calculate binomial coefficients
by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
# def nSplits(n):
# mySum = 0
# for k in range(2, n-1):
# mySum += choose(n-1, k)
# return mySum
def bForN(n):
# This is the log version of this function. The max diff (in
# log(result)) between this and the non-log function seems to be
# about 2.5e-10 for n up to 10000.
prodLog = 0.0
if n > 3:
for k in range(4, n + 1):
prodLog += math.log((2 * k) - 5)
return prodLog
def BS2009_Eqn30_ZTApprox(n, beta, cT):
# This log version of this function differs from from the non-log
# version (in log(result)) by at most 6.82e-13 for n up to 150,
# over a wide range of beta (0.001 -- 1000) and cT (2 -- n/2)
myLambda = cT/(2.0*n)
tester = 0.5 * math.log((n - 3.)/myLambda)
epsilon = math.exp(-2. * beta)
bigANEpsilon = 1 + (((2. * n) - 3.) * epsilon) + (2. * ((n * n) - (4. * n) - 6.) * epsilon * epsilon)
termA = math.log(bigANEpsilon + 6 * cT * epsilon * epsilon)
if beta < tester:
termB = -(2. * beta) * (n - 3.) + (myLambda * (math.exp(2. * beta) - 1.))
termB += bForN(n)
if termA > termB:
return termA
else:
return termB
else:
return termA
def popcountA(k, nBits):
count = 0
for i in range(nBits):
tester = 1L << i
if tester > k:
return count
if tester & k:
count += 1
return count
def bitReduce(bk, txBits, lLen, sLen, allOnes):
#print "bitReduce: bk %i, txBits %i, lLen %i, sLen %i, allOnes %i" % (bk, txBits, lLen, sLen, allOnes)
newBk = 0L
counter = 0
pops = 0
for pos in range(lLen):
tester = 1L << pos
#print "pos %2i, tester: %3i" % (pos, tester)
if tester & txBits:
#print " tester & txBits -- True"
if tester & bk:
adder = 1L << counter
#print " adding:", adder
newBk += adder
pops += 1
else:
#print " not adding"
pass
counter += 1
if (1 & newBk):
#print "flipping"
newBk = allOnes ^ newBk
pops = sLen - pops
#print "returning newBk %i, pops %i" % (newBk, pops)
return newBk, pops
if 0: # test bitReduce
sk = 6 # always at least 2 bits, even
txBits = 30
lLen = 5
sLen = 4
allOnes = 15
print " sk: %3i %s" % (sk, func.getSplitStringFromKey(sk, lLen))
print "taxBits: %3i %s" % (txBits, func.getSplitStringFromKey(txBits, lLen))
rsk, popcount = bitReduce(sk, txBits, lLen, sLen, allOnes)
print " rsk: %3i %s" % (rsk, func.getSplitStringFromKey(rsk, sLen))
print " popcount %i" % popcount
# sk: 6 .**..
# taxBits: 30 .****
# rsk: 12 ..**
# popcount 2
def maskedSymmetricDifference(skk, skSet, taxBits, longLen, shortLen, allOnes):
if 0:
print "-" * 50
print "skk (skk_ppy1 from the current supertree)"
for sk in skk:
print func.getSplitStringFromKey(sk, longLen)
print "skSet (from input tree)"
for sk in skSet:
print func.getSplitStringFromKey(sk, shortLen)
print "taxBits:", taxBits, func.getSplitStringFromKey(taxBits, longLen)
newSkk = []
for sk in skk:
reducedSk, popcount = bitReduce(sk, taxBits, longLen, shortLen, allOnes)
if 0:
print "taxBits: %s " % func.getSplitStringFromKey(taxBits, longLen),
print "%4i %s " % (sk, func.getSplitStringFromKey(sk, longLen)),
print "%4i %s %i" % (reducedSk, func.getSplitStringFromKey(reducedSk, shortLen), popcount)
if popcount <= 1 or popcount >= (shortLen - 1):
pass
else:
newSkk.append(reducedSk)
newSkkSet = set(newSkk)
#print newSkkSet, skSet
#print "reduced supertree splits = newSkkSet = %s" % newSkkSet
ret = len(newSkkSet.symmetric_difference(skSet))
#print "symmetric difference %i" % ret
nCherries = 0
for sk in newSkkSet:
popcount = popcountA(sk, shortLen)
if popcount == 2:
nCherries += 1
if popcount == (shortLen - 2): # not "elif", because they might both be True
nCherries += 1
#print "nCherries %i" % nCherries
return ret, nCherries
def slowQuartetDistance(st, inputTree):
dst = st.dupe()
toRemove = []
for n in dst.iterLeavesNoRoot():
if n.name not in inputTree.taxNames:
toRemove.append(n)
for n in toRemove:
dst.removeNode(n)
qd = dst.topologyDistance(inputTree, metric='scqdist')
return qd
class STChain(object):
def __init__(self, aSTMcmc):
gm = ['STChain.__init__()']
self.stMcmc = aSTMcmc
self.tempNum = -1 # 'temp'erature, not 'temp'orary
self.curTree = aSTMcmc.tree.dupe()
self.propTree = aSTMcmc.tree.dupe()
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
self.frrf = None
self.nInTreeSplits = 0
if self.stMcmc.modelName.startswith('SR2008_rf'):
self.curTree.beta = self.stMcmc.beta
self.propTree.beta = self.stMcmc.beta
if self.stMcmc.stRFCalc == 'purePython1':
self.getTreeLogLike_ppy1()
elif self.stMcmc.stRFCalc == 'fastReducedRF':
self.startFrrf()
self.getTreeLogLike_fastReducedRF()
elif self.stMcmc.stRFCalc == 'bitarray':
self.setupBitarrayCalcs()
self.getTreeLogLike_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('SPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
for t in self.stMcmc.trees:
self.nInTreeSplits += len(t.splSet)
#print "Got nInTreeSplits %s" % self.nInTreeSplits
self.setupBitarrayCalcs()
self.getTreeLogLike_spa_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('QPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
self.nPossibleQuartets = choose(self.stMcmc.tree.nTax, 4) * 3
self.getTreeLogLike_qpa_slow()
self.curTree.logLike = self.propTree.logLike
else:
gm.append('Unknown modelName %s' % self.stMcmc.modelName)
raise Glitch, gm
if 0:
print "STChain init()"
self.curTree.draw()
print "logLike is %f" % self.curTree.logLike
def getTreeLogLike_qpa_slow(self):
gm = ["STChain.getTreeLogLike_qpa_slow()"]
if self.propTree.spaQ > 1. or self.propTree.spaQ <= 0.0:
gm.append("bad propTree.spaQ value %f" % self.propTree.spaQ)
raise Glitch, gm
for n in self.propTree.iterInternalsPostOrder():
if n == self.propTree.root:
break
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
self.propTree.skk = [n.stSplitKey for n in self.propTree.iterInternalsNoRoot()]
self.propTree.qSet = set()
for sk in self.propTree.skk:
ups = [txBit for txBit in self.propTree.taxBits if (sk & txBit)]
downs = [txBit for txBit in self.propTree.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
if down[0] > down[1]:
down = (down[1], down[0])
for up in itertools.combinations(ups, 2):
if up[0] > up[1]:
up = (up[1], up[0])
if down[0] < up[0]:
self.propTree.qSet.add(down+up)
else:
self.propTree.qSet.add(up+down)
#print self.propTree.qSet
self.propTree.nQuartets = len(self.propTree.qSet)
if self.propTree.nQuartets:
q = self.propTree.spaQ / self.propTree.nQuartets
R = 1. - self.propTree.spaQ
r = R / (self.nPossibleQuartets - self.propTree.nQuartets)
logq = math.log(q)
else:
R = 1.
r = R / self.nPossibleQuartets
logr = math.log(r)
self.propTree.logLike = 0.0
for it in self.stMcmc.trees:
for qu in it.qSet:
if qu in self.propTree.qSet:
self.propTree.logLike += logq
else:
self.propTree.logLike += logr
def getTreeLogLike_spa_bitarray(self):
gm = ["STChain.getTreeLogLike_spa_bitarray"]
if self.propTree.spaQ > 1. or self.propTree.spaQ <= 0.0:
gm.append("bad propTree.spaQ value %f" % self.propTree.spaQ)
raise Glitch, gm
slowCheck = False
if slowCheck:
slowCheckLogLike = 0.0
for it in self.stMcmc.trees:
it.makeSplitKeys()
it.skk = [n.br.splitKey for n in it.iterInternalsNoRoot()]
self.propTree.logLike = 0.0
for it in self.stMcmc.trees:
if 0:
print "-" * 50
it.draw()
print "baTaxBits %s" % it.baTaxBits
print "firstTax at %i" % it.firstTax
if slowCheck:
stDupe = self.propTree.dupe()
toRemove = []
for n in stDupe.iterLeavesNoRoot():
if n.name not in it.taxNames:
toRemove.append(n)
for n in toRemove:
stDupe.removeNode(n)
stDupe.taxNames = it.taxNames
stDupe.makeSplitKeys(makeNodeForSplitKeyDict=True)
# No need to consider (masked) splits with less than two
# 1s or more than it.nTax - 2 1s.
upperGood = it.nTax - 2
relevantStSplits = []
for n in self.propTree.iterInternalsNoRoot():
# Choose which spl (spl or spl2) based on it.firstTax)
if n.ss.spl[it.firstTax]:
n.ss.theSpl = n.ss.spl
else:
n.ss.theSpl = n.ss.spl2
n.ss.maskedSplitWithTheFirstTaxOne = n.ss.theSpl & it.baTaxBits
n.ss.onesCount = n.ss.maskedSplitWithTheFirstTaxOne.count()
if 0:
print "bigT node %i" % n.nodeNum
print " theSpl is %s" % n.ss.theSpl
print " maskedSplitWithTheFirstTaxOne %s" % n.ss.maskedSplitWithTheFirstTaxOne
print " onesCount %i" % n.ss.onesCount
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
print " -> relevant"
else:
print " -> not relevant"
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
relevantStSplits.append(n.ss)
nonRedundantStSplits = []
for ss in relevantStSplits:
alreadyIn = False
for ssB in nonRedundantStSplits:
if ss.maskedSplitWithTheFirstTaxOne == ssB.maskedSplitWithTheFirstTaxOne:
alreadyIn = True
break
if alreadyIn == False:
nonRedundantStSplits.append(ss)
if 0:
for ss in relevantStSplits:
ss.dump()
print "There are %i relevant splits in the st for this it." % len(relevantStSplits)
for ss in nonRedundantStSplits:
ss.dump()
print "There are %i non-redundant splits in the st for this it." % len(nonRedundantStSplits)
S_st = len(nonRedundantStSplits) # S_st is the number of splits in the reduced supertree
if slowCheck:
#stDupe.draw()
#print "the drawing above is stDupe"
slowCheckS_st = len([n for n in stDupe.iterInternalsNoRoot()])
assert S_st == slowCheckS_st
S = 2**(it.nTax - 1) - (it.nTax + 1) # S is the number of possible splits in an it-sized tree
#print "S=%i, S_st=%i" % (S, S_st)
if S_st:
q = self.propTree.spaQ / S_st
R = 1. - self.propTree.spaQ
r = R/(S - S_st)
#print "q=%f" % q
logq = math.log(q)
else:
R = 1.
r = R/S
#print "r=%f" % r
logr = math.log(r)
# for ss in nonRedundantStSplits:
# ss.bytes = ss.maskedSplitWithTheFirstTaxOne.tobytes()
# ret = ss.bytes in it.splSet
# if ret:
# print " iT has reduced split %s" % ss.bytes
# self.propTree.logLike += logq
# else:
# print " iT does not have reduced split %s" % ss.bytes
# self.propTree.logLike += logr
mySSForBytesDict = {}
for ss in nonRedundantStSplits:
ss.bytes = ss.maskedSplitWithTheFirstTaxOne.tobytes()
mySSForBytesDict[ss.bytes] = ss
for spl in it.splSet:
ret = mySSForBytesDict.get(spl)
if ret:
#print " st has reduced split %s" % spl
self.propTree.logLike += logq
else:
#print " st does not have reduced split %s" % spl
self.propTree.logLike += logr
if slowCheck:
for sk in it.skk:
ret = stDupe.nodeForSplitKeyDict.get(sk)
if ret:
slowCheckLogLike += logq
else:
slowCheckLogLike += logr
myDiff = self.propTree.logLike - slowCheckLogLike
if math.fabs(myDiff) > 1.e-12:
gm.append("Bad like calc. slowCheck %f, bitarray %f, diff %g" % (
slowCheckLogLike, self.propTree.logLike, myDiff))
raise Glitch, gm
def setupBitarrayCalcs(self):
# Prepare self.propTree (ie bigT). First make n.stSplitKeys. These are temporary.
for n in self.propTree.iterPostOrder():
if n == self.propTree.root:
break
if n.isLeaf:
spot = self.stMcmc.taxNames.index(n.name)
self.stMcmc.tBits[spot] = True
n.stSplitKey = bitarray.bitarray(self.stMcmc.tBits)
self.stMcmc.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
# Next transfer the internal node split keys to BigTSplitStuff objects
for n in self.propTree.iterInternalsNoRoot():
n.ss = BigTSplitStuff()
n.ss.spl = n.stSplitKey
n.ss.spl2 = n.ss.spl.copy()
n.ss.spl2.invert()
# This next one will be empty, not used immediately, but will
# be used after supertree rearrangements.
self.propTree.root.ss = BigTSplitStuff()
def refreshBitarrayPropTree(self):
# Refresh self.propTree (ie bigT) after a topology change.
for n in self.propTree.iterPostOrder():
if n == self.propTree.root:
break
if n.isLeaf:
pass
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
# Next transfer the internal node split keys to BigTSplitStuff objects
for n in self.propTree.iterInternalsNoRoot():
n.ss.spl = n.stSplitKey
n.ss.spl2 = n.ss.spl.copy()
n.ss.spl2.invert()
def startFrrf(self):
# if using self.stMcmc.stRFCalc= 'fastReducedRF'
self.frrf = self.stMcmc.Frrf(len(self.stMcmc.taxNames))
self.bigTr = self.frrf.setBigT(len(self.propTree.nodes), self.propTree.nTax, self.propTree.postOrder)
for n in self.propTree.nodes:
if n.parent:
self.bigTr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
self.bigTr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
else:
self.bigTr.setNodeTaxNum(n.nodeNum, self.stMcmc.taxNames.index(n.name))
if n.sibling:
self.bigTr.setSibling(n.nodeNum, n.sibling.nodeNum)
if 1:
for t in self.stMcmc.trees:
tr = self.frrf.appendInTree(len(t.nodes), t.nTax, t.postOrder)
for n in t.nodes:
if n.parent:
tr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
tr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
else:
tr.setNodeTaxNum(n.nodeNum, self.stMcmc.taxNames.index(n.name))
if n.sibling:
tr.setSibling(n.nodeNum, n.sibling.nodeNum)
self.frrf.setInTreeTaxBits()
self.frrf.setInTreeInternalBits()
self.frrf.maybeFlipInTreeBits()
self.frrf.setBigTInternalBits()
#self.frrf.dump()
def getTreeLogLike_ppy1(self):
gm = ['STChain.getTreeLogLike_pp1']
self.propTree.makeSplitKeys()
self.propTree.skk = [n.br.splitKey for n in self.propTree.iterInternalsNoRoot()]
self.propTree.logLike = 0.0
for t in self.stMcmc.trees:
# Get the distance
thisDist = None
if self.stMcmc.modelName.startswith('SR2008_rf'):
thisDist, nCherries = maskedSymmetricDifference(self.propTree.skk, t.skSet,
t.taxBits, self.stMcmc.nTax, t.nTax, t.allOnes)
else:
raise Glitch, "STChain.getTreeLogLike_ppy1() unknown model '%s'" % self.stMcmc.modelName
# Now multiply by beta, and do approximate Z_T
assert thisDist != None
beta_distance = self.propTree.beta * thisDist
if self.stMcmc.modelName == 'SR2008_rf_ia':
self.propTree.logLike -= beta_distance
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
log_approxZT = BS2009_Eqn30_ZTApprox(t.nTax, self.propTree.beta, nCherries)
if 0:
# Testing, testing ...
assert self.propTree.beta == 0.1
assert t.nTax == 6
if nCherries == 2:
log_approxZT = 4.13695897651 # exact
elif nCherries == 3:
log_approxZT = 4.14853562562
self.propTree.logLike -= log_approxZT
self.propTree.logLike -= beta_distance
else:
gm.append("Unknown modelName %s" % self.stMcmc.modelName)
raise Glitch, gm
def getTreeLogLike_fastReducedRF(self):
slowCheck = False
if slowCheck:
self.getTreeLogLike_ppy1()
savedLogLike = self.propTree.logLike
self.frrf.wipeBigTPointers()
for n in self.propTree.nodes:
if n.parent:
self.bigTr.setParent(n.nodeNum, n.parent.nodeNum)
if n.leftChild:
self.bigTr.setLeftChild(n.nodeNum, n.leftChild.nodeNum)
#else:
# bigTr.setNodeTaxNum(n.nodeNum, tNames.index(n.name))
if n.sibling:
self.bigTr.setSibling(n.nodeNum, n.sibling.nodeNum)
self.frrf.setBigTInternalBits()
if self.stMcmc.modelName == 'SR2008_rf_ia':
sd = self.frrf.getSymmDiff()
self.propTree.logLike = -sd * self.propTree.beta
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
self.propTree.logLike = self.frrf.getLogLike(self.propTree.beta)
if slowCheck:
if self.propTree.logLike != savedLogLike:
gm = ['STChain.getTreeLogLike_fastReducedRF()']
gm.append("Slow likelihood %f" % savedLogLike)
gm.append("Fast likelihood %f" % self.propTree.logLike)
raise Glitch, gm
def getTreeLogLike_bitarray(self):
self.propTree.logLike = 0.0
slowCheck = False
if slowCheck:
self.propTree.makeSplitKeys()
self.propTree.skk = [n.br.splitKey for n in self.propTree.iterInternalsNoRoot()]
for t in self.stMcmc.trees:
if 0:
print "-" * 50
t.draw()
print "baTaxBits %s" % t.baTaxBits
print "firstTax at %i" % t.firstTax
usables = [] # splitStuff objects with onesCount >= 2 and <= t.nTax = 2
# No need to consider (masked) splits with less than two
# 1s or more than nTax - 2 1s. The nTax depends on the
# input tree.
upperGood = t.nTax - 2
for n in self.propTree.iterInternalsNoRoot():
# Choose which spl (spl or spl2) based on t.firstTax)
if n.ss.spl[t.firstTax]:
n.ss.theSpl = n.ss.spl
else:
n.ss.theSpl = n.ss.spl2
n.ss.maskedSplitWithTheFirstTaxOne = n.ss.theSpl & t.baTaxBits
n.ss.onesCount = n.ss.maskedSplitWithTheFirstTaxOne.count()
if 0:
print "bigT node %i" % n.nodeNum
print " theSpl is %s" % n.ss.theSpl
print " maskedSplitWithTheFirstTaxOne %s" % n.ss.maskedSplitWithTheFirstTaxOne
print " onesCount %i" % n.ss.onesCount
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
print " -> used"
else:
print " -> not used"
if n.ss.onesCount >= 2 and n.ss.onesCount <= upperGood:
usables.append(n.ss)
usablesDict = {}
for usable in usables:
usable.bytes = usable.maskedSplitWithTheFirstTaxOne.tobytes()
usablesDict[usable.bytes] = usable
splSet = set() # bytes, for RF calculation
for usable in usables:
# splSet.add(n.ss.maskedSplitWithTheFirstTaxOne.tobytes())
splSet.add(usable.bytes)
thisBaRF = len(splSet.symmetric_difference(t.splSet))
if slowCheck: # with purePython1
thisPPyRF, thisPPyNCherries = maskedSymmetricDifference(self.propTree.skk, t.skSet,
t.taxBits, self.stMcmc.nTax, t.nTax, t.allOnes)
if thisBaRF != thisPPyRF:
raise Glitch, "bitarray and purePython1 RF calcs differ."
beta_distance = self.propTree.beta * thisBaRF
if self.stMcmc.modelName == 'SR2008_rf_ia':
self.propTree.logLike -= beta_distance
elif self.stMcmc.modelName.startswith('SR2008_rf_aZ'):
nCherries = 0
for ba in splSet:
theSS = usablesDict[ba]
#theSS.dump()
if theSS.onesCount == 2:
nCherries += 1
if theSS.onesCount == upperGood:
nCherries += 1
if slowCheck:
if nCherries != thisPPyNCherries:
raise Glitch, "bitarray and purePython1 nCherries calcs differ."
log_approxZT = BS2009_Eqn30_ZTApprox(t.nTax, self.propTree.beta, nCherries)
self.propTree.logLike -= log_approxZT
self.propTree.logLike -= beta_distance
else:
gm.append("Unknown model %s" % self.stMcmc.modelName)
raise Glitch, gm
def proposePolytomy(self, theProposal):
theProposal.doAbort = False
dbug = False
if dbug:
#print "proposePolytomy() starting with this tree ..."
#self.propTree.draw(width=80, addToBrLen=0.2)
print "j There are %i internal nodes." % self.propTree.nInternalNodes
if self.propTree.nInternalNodes == 1:
print "-> so its a star tree -> proposeDeleteEdge is not possible."
elif self.propTree.nInternalNodes == self.propTree.nTax - 2:
print "-> so its a fully-resolved tree, so proposeAddEdge is not possible."
if self.propTree.nInternalNodes == 1: # a star tree
self.proposeAddEdge(theProposal)
elif self.propTree.nInternalNodes == self.propTree.nTax - 2:
candidateNodes = self._getCandidateNodesForDeleteEdge()
if candidateNodes:
self.proposeDeleteEdge(theProposal, candidateNodes)
else:
#gm = ["proposePolytomy()"]
#gm.append("The tree is fully resolved, so I can't proposeAddEdge()")
#gm.append("But there are no suitable nodes to remove.")
#raise Glitch, gm
theProposal.doAbort = True
self.curTree._nInternalNodes = self.propTree._nInternalNodes
return
else:
r = random.random()
#r = 0.4
if r < 0.5:
self.proposeAddEdge(theProposal)
else:
candidateNodes = self._getCandidateNodesForDeleteEdge()
if candidateNodes:
self.proposeDeleteEdge(theProposal, candidateNodes)
else:
self.proposeAddEdge(theProposal)
#if self.mcmc.constraints:
# print "checkSplitKeys() at the end of polytomy"
# self.propTree.checkSplitKeys()
def proposeAddEdge(self, theProposal):
gm = ["STChain.proposeAddEdge()"]
#print "proposeAddEdge() here"
dbug = False
pTree = self.propTree
if 0:
print "proposeAddEdge(), starting with this tree ..."
pTree.draw()
print "k There are %i internal nodes." % pTree.nInternalNodes
print "root is node %i" % pTree.root.nodeNum
allPolytomies = []
for n in pTree.iterInternalsNoRoot():
if n.getNChildren() > 2:
allPolytomies.append(n)
if pTree.root.getNChildren() > 3:
allPolytomies.append(pTree.root)
theChosenPolytomy = random.choice(allPolytomies)
# We want to choose one of the possible ways to add a node. See
# Lewis et al page 246, left top. "The number of distinct ways of
# dividing k edges into two groups, making sure that at least 3
# edges are attached to each node afterwards, is 2^{k-1} - k - 1".
# For non-root polytomies (with 3 or more children), it is
# straightforward, but for root polytomies (ie with 4 or more
# children) it is different. I think in the case of root
# polytomies that they will be equivalent to non-root polytomies
# if I arbitrarily consider one randomly chosen child node to
# take the role that the parent takes in the non-root-polytomies.
# So a 4-child root will be considered to have a parent-like node
# and 3 children.
if theChosenPolytomy != pTree.root:
nChildren = theChosenPolytomy.getNChildren()
k = nChildren + 1
childrenNodeNums = pTree.getChildrenNums(theChosenPolytomy)
else:
# Its the root. So we say that a random child takes the role
# of the "parent", for purposes of these calculations.
nChildren = theChosenPolytomy.getNChildren() - 1 # n - 1 children
k = nChildren + 1
childrenNodeNums = pTree.getChildrenNums(theChosenPolytomy) # Yes, all children.
nPossibleWays = math.pow(2, k-1) - k - 1
if dbug:
print "These nodes are polytomies: %s" % [n.nodeNum for n in allPolytomies]
print "We randomly choose to do node %i" % theChosenPolytomy.nodeNum
print "It has %i children, so k=%i, so there are %i possible ways to add a node." % (
nChildren, k, nPossibleWays)
# We want to choose one of the possible ways to add a node, but we
# want to choose it randomly. I'll describe it for the case with
# nChildren=5, so k is 6. We know already that there are
# nPossibleWays=25 different ways to add a node. The complication
# is that we could make a new group of 2, 3, or 4 nInNewGroup, and it will be
# different numbers of possible ways in each. The numbers of each are given by
# func.nChoosek(), so there are 10 ways to make a group of 2 from 5
# children, 10 ways to make a group of 3 from 5 children, and 5
# ways to make a group of 4 from 5 children. So thats [10, 10,
# 5], which sums to 25 (nPossibleWays). So we can make a
# cumulative sum list ie [10, 20, 25], and use it to choose one
# group randomly.
nChooseKs = []
for i in range(2, nChildren):
nChooseKs.append(func.nChooseK(nChildren, i))
cumSum = [nChooseKs[0]]
for i in range(len(nChooseKs))[1:]:
cumSum.append(nChooseKs[i] + cumSum[i-1])
ran = random.randrange(nPossibleWays)
for i in range(len(cumSum)):
if ran < cumSum[i]:
break
nInNewGroup = i + 2
# Ok, so we have decided that of the nChildren of
# theChosenPolytomy, we will make a new node with a group of
# nInNewGroup of them. For that, we can use random.sample().
newChildrenNodeNums = random.sample(childrenNodeNums, nInNewGroup)
if dbug:
print "The nChooseKs are %s" % nChooseKs
print "The cumSum is %s" % cumSum
print "Since there are nPossibleWays=%i, we choose a random number from 0-%i" % (
nPossibleWays, nPossibleWays-1)
print "->We chose a random number: %i" % ran
print "So we choose the group at index %i, which means nInNewGroup=%i" % (i, nInNewGroup)
print "So we make a new node with newChildrenNodeNums %s" % newChildrenNodeNums
#sys.exit()
# Choose to add a node between theChosenPolytomy and the first in
# the list of newChildrenNodeNums. The node that we add will be
# chosen from pTree.nodes for the first node where both the parent
# and the leftChild are None.
firstNode = pTree.nodes[newChildrenNodeNums[0]]
for newNode in pTree.nodes:
if not newNode.parent and not newNode.leftChild:
break
#print "Got newNode = %i" % newNode.nodeNum
# Add the newNode between theChosenPolytomy and firstNode
newNode.parent = theChosenPolytomy
newNode.leftChild = firstNode
firstNode.parent = newNode
if theChosenPolytomy.leftChild == firstNode:
theChosenPolytomy.leftChild = newNode
else:
oldCh = theChosenPolytomy.leftChild
while oldCh.sibling != firstNode:
oldCh = oldCh.sibling
oldCh.sibling = newNode
if firstNode.sibling:
newNode.sibling = firstNode.sibling
firstNode.sibling = None
pTree.setPreAndPostOrder()
pTree._nInternalNodes += 1
if 0:
#pTree.setPreAndPostOrder()
pTree.draw()
for nodeNum in newChildrenNodeNums[1:]:
n = pTree.pruneSubTreeWithoutParent(nodeNum)
pTree.reconnectSubTreeWithoutParent(n, newNode)
# Calculate the rawSplitKey and splitKey.
# if self.mcmc.constraints:
# children = [n for n in newNode.iterChildren()]
# x = children[0].br.rawSplitKey
# for n in children[1:]:
# y = n.br.rawSplitKey
# x = x | y # '|' is bitwise "OR".
# newNode.br.rawSplitKey = x
# if 1 & newNode.br.rawSplitKey: # Ie "Does rawSplitKey contain a 1?" or "Is rawSplitKey odd?"
# if self.mcmc.constraints:
# newNode.br.splitKey = self.mcmc.constraints.allOnes ^ newNode.br.rawSplitKey # "^" is xor, a bit-flipper.
# else:
# allOnes = 2L**(self.propTree.nTax) - 1
# newNode.br.splitKey = allOnes ^ newNode.br.rawSplitKey
# else:
# newNode.br.splitKey = newNode.br.rawSplitKey
# Its a newly-added node, possibly in a new context. We need to
# deal with model stuff if it isHet. The model.isHet if any part
# isHet.
if dbug:
pTree.setPreAndPostOrder()
pTree.draw()
# Now the Hastings ratio. First calculate gamma_B. If the
# current tree is a star tree (nInternalNodes == 1) and the
# proposed tree is not fully resolved (ie is less than
# len(self.propTree.nodes) - 2), then gamma_B is 0.5.
if (self.curTree.nInternalNodes == 1) and (pTree.nInternalNodes < (len(pTree.nodes) - 2)):
gamma_B = 0.5
# If the proposed tree is fully resolved and the current tree is not the star tree
elif (pTree.nInternalNodes == (len(pTree.nodes) - 2)) and (self.curTree.nInternalNodes > 1):
gamma_B = 2.0
else:
gamma_B = 1.0
# n_e is number of internal edges present before the Add-edge move. That would be self.curTree.nInternalNodes - 1
n_e = float(self.curTree.nInternalNodes - 1)
# n_p is the number of polytomies present before the move, len(allPolytomies)
n_p = float(len(allPolytomies))
hastingsRatio = (gamma_B * n_p * float(nPossibleWays)) / (1.0 + n_e)
if dbug:
print "The new node is given a random branch length of %f" % newNode.br.len
print "For the Hastings ratio ..."
print "gamma_B is %.1f" % gamma_B
print "n_e is %.0f" % n_e
print "k is (still) %i, and (2^{k-1} - k - 1) = nPossibleWays is still %i" % (k, nPossibleWays)
print "n_p = %.0f is the number of polytomies present before the move." % n_p
print "So the hastings ratio is %f" % hastingsRatio
self.logProposalRatio = math.log(hastingsRatio)
if 0:
priorRatio = self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * newNode.br.len)
if dbug:
print "The self.mcmc.tunings.brLenPriorLambda is %f" % self.mcmc.tunings.brLenPriorLambda
print "So the prior ratio is %f" % priorRatio
self.logPriorRatio = math.log(priorRatio)
# The Jacobian
jacobian = 1.0 / (self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * newNode.br.len))
self.logJacobian = math.log(jacobian)
print "logPriorRatio = %f, logJacobian = %f" % (self.logPriorRatio, self.logJacobian)
# Here I pull a fast one, as explained in Lewis et al. The
# priorRatio and the Jacobian terms cancel out. So the logs might
# as well be zeros.
self.logPriorRatio = 0.0
#self.logJacobian = 0.0
# That was easy, wasn't it?
if self.stMcmc.tunings.doPolytomyResolutionClassPrior:
# We are gaining a node. So the prior ratio is T_{n,m + 1} /
# (T_{n,m} * C) . We have the logs, and the result is the
# log.
if 0:
print "-" * 30
print 'curTree.nInternalNodes', self.curTree.nInternalNodes
print 'pTree.nInternalNodes', pTree.nInternalNodes
print 'logBigT[curTree.nInternalNodes]', theProposal.logBigT[self.curTree.nInternalNodes]
#print math.exp(theProposal.logBigT[self.curTree.nInternalNodes])
print 'C ', self.stMcmc.tunings.polytomyPriorLogBigC
print 'logBigT[pTree.nInternalNodes]', theProposal.logBigT[pTree.nInternalNodes]
#print math.exp(theProposal.logBigT[pTree.nInternalNodes])
print "-" * 30
self.logPriorRatio = (theProposal.logBigT[self.curTree.nInternalNodes] -
(self.stMcmc.tunings.polytomyPriorLogBigC +
theProposal.logBigT[pTree.nInternalNodes]))
else:
if self.stMcmc.tunings.polytomyPriorLogBigC:
self.logPriorRatio = -self.stMcmc.tunings.polytomyPriorLogBigC
else:
self.logPriorRatio = 0.0
#print "gaining a node, m %2i->%2i. logPriorRatio is %f" % (self.curTree.nInternalNodes,
# pTree.nInternalNodes, self.logPriorRatio)
def _getCandidateNodesForDeleteEdge(self):
pTree = self.propTree
nodesWithInternalEdges = [n for n in pTree.iterInternalsNoRoot()]
# Remove any that might violate constraints.
# if self.mcmc.constraints:
# nodesToRemove = []
# for n in nodesWithInternalEdges:
# if n.br.splitKey in self.mcmc.constraints.constraints:
# nodesToRemove.append(n)
# for n in nodesToRemove:
# nodesWithInternalEdges.remove(n)
return nodesWithInternalEdges
def proposeDeleteEdge(self, theProposal, candidateNodes):
dbug = False
pTree = self.propTree
#print "doing proposeDeleteEdge()"
if 0:
print "proposeDeleteEdge(), starting with this tree ..."
pTree.draw()
print "m There are %i internal nodes (before deleting the edge)." % pTree.nInternalNodes
if not candidateNodes:
raise Glitch, "proposeDeleteEdge() could not find a good node to attempt to delete."
theChosenNode = random.choice(candidateNodes)
if dbug:
print "There are %i candidateNodes." % len(candidateNodes)
print "node nums %s" % [n.nodeNum for n in candidateNodes]
print "Randomly choose node %s" % theChosenNode.nodeNum
theNewParent = theChosenNode.parent
theRightmostChild = theChosenNode.rightmostChild()
theLeftSib = theChosenNode.leftSibling()
if theLeftSib:
theLeftSib.sibling = theChosenNode.leftChild
else:
theNewParent.leftChild = theChosenNode.leftChild
for n in theChosenNode.iterChildren():
n.parent = theNewParent
theRightmostChild.sibling = theChosenNode.sibling
theChosenNode.wipe()
pTree.setPreAndPostOrder()
pTree._nInternalNodes -= 1
#print pTree.preOrder
#if dbug:
# pTree.draw()
# Hastings ratio. First calculate the gamma_D. If the current
# tree is fully resolved and the proposed tree is not the star
# tree, then gamma_D is 0.5
if (self.curTree.nInternalNodes == len(pTree.nodes) - 2) and pTree.nInternalNodes != 1:
gamma_D = 0.5
# If the proposed tree is the star tree and the current tree is not fully resolved
elif (self.curTree.nInternalNodes < len(pTree.nodes) - 2) and pTree.nInternalNodes == 1:
gamma_D = 2.
else:
gamma_D = 1.
# n_e is the number of internal edges in existence before the move, which would be nInternalNodes - 1
n_e = float(self.curTree.nInternalNodes - 1)
# nStar_p is the number of polytomies in the tree after the move.
nStar_p = 0
for n in pTree.iterInternalsNoRoot():
if n.getNChildren() > 2:
nStar_p += 1
if pTree.root.getNChildren() > 3:
nStar_p += 1
nStar_p = float(nStar_p)
# kStar is the number of edges emanating from the polytomy created (or enlarged) by the move.
kStar = theNewParent.getNChildren()
if theNewParent.parent:
kStar += 1
hastingsRatio = (gamma_D * n_e) / (nStar_p * (2**(kStar - 1) - kStar - 1))
self.logProposalRatio = math.log(hastingsRatio)
if 0:
# Now the prior ratio. The prior probability density f(nu) for a
# branch length is lambda * exp(-lambda * nu). To a first
# approximation, with equal priors on topologies, the prior ratio
# is 1/f(nu)
priorRatio = 1.0/(self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * theChosenNode.br.len))
if dbug:
print "The self.mcmc.tunings.brLenPriorLambda is %f" % self.mcmc.tunings.brLenPriorLambda
print "So the prior ratio is %f" % priorRatio
self.logPriorRatio = math.log(priorRatio)
# The Jacobian
jacobian = self.mcmc.tunings.brLenPriorLambda * math.exp(- self.mcmc.tunings.brLenPriorLambda * theChosenNode.br.len)
self.logJacobian = math.log(jacobian)
print "logPriorRatio = %f, logJacobian = %f" % (self.logPriorRatio, self.logJacobian)
# Here I pull a fast one, as explained in Lewis et al. The
# priorRatio and the Jacobian terms cancel out. So the logs might
# as well be zeros.
self.logPriorRatio = 0.0
#self.logJacobian = 0.0
# That was easy, wasn't it?
if self.stMcmc.tunings.doPolytomyResolutionClassPrior:
# We are losing a node. So the prior ratio is (T_{n,m} * C) /
# T_{n,m - 1}. We have the logs, and the result is the log.
if 0:
print "-" * 30
print 'curTree.nInternalNodes', self.curTree.nInternalNodes
print 'pTree.nInternalNodes', pTree.nInternalNodes
print 'logBigT[curTree.nInternalNodes]', theProposal.logBigT[self.curTree.nInternalNodes]
#print math.exp(theProposal.logBigT[self.curTree.nInternalNodes])
print 'C ', self.stMcmc.tunings.polytomyPriorLogBigC
print 'logBigT[pTree.nInternalNodes]', theProposal.logBigT[pTree.nInternalNodes]
#print math.exp(theProposal.logBigT[pTree.nInternalNodes])
print "-" * 30
self.logPriorRatio = ((theProposal.logBigT[self.curTree.nInternalNodes] +
self.stMcmc.tunings.polytomyPriorLogBigC) -
theProposal.logBigT[pTree.nInternalNodes])
else:
if self.stMcmc.tunings.polytomyPriorLogBigC:
self.logPriorRatio = self.stMcmc.tunings.polytomyPriorLogBigC
else:
self.logPriorRatio = 0.0
#print " losing a node, m %2i->%2i. logPriorRatio is %f" % (self.curTree.nInternalNodes,
# pTree.nInternalNodes, self.logPriorRatio)
def propose(self, theProposal):
gm = ['STChain.propose()']
#print "propose() About to propose %s" % theProposal.name
if theProposal.name == 'nni':
#self.proposeNni(theProposal)
self.propTree.nni() # this does setPreAndPostOrder()
if theProposal.doAbort:
pass
#else:
# if not self.propTree.preAndPostOrderAreValid: # not needed
# self.propTree.setPreAndPostOrder()
elif theProposal.name == 'spr':
self.propTree.randomSpr()
if theProposal.doAbort:
pass
else:
if not self.propTree.preAndPostOrderAreValid:
self.propTree.setPreAndPostOrder()
elif theProposal.name == 'SR2008beta_uniform':
mt = self.propTree.beta
# Slider proposal
mt += (random.random() - 0.5) * theProposal.tuning
# Linear reflect
isGood = False
myMIN = 1.e-10
myMAX = 1.e+10
while not isGood:
if mt < myMIN:
mt = (myMIN - mt) + myMIN
elif mt > myMAX:
mt = myMAX - (mt - myMAX)
else:
isGood = True
self.propTree.beta = mt
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
elif theProposal.name == 'spaQ_uniform':
mt = self.propTree.spaQ
#originally = mt
# Slider proposal
mt += (random.random() - 0.5) * theProposal.tuning
# Linear reflect
isGood = False
myMIN = 1.e-10
myMAX = 1.
while not isGood:
if mt < myMIN:
mt = (myMIN - mt) + myMIN
elif mt > myMAX:
mt = myMAX - (mt - myMAX)
else:
isGood = True
self.propTree.spaQ = mt
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
#print "proposing mt from %.3f to %.3f, diff=%g" % (originally, mt, mt-originally)
elif theProposal.name == 'polytomy':
self.proposePolytomy(theProposal)
if not self.propTree.preAndPostOrderAreValid:
self.propTree.setPreAndPostOrder()
#self.propTree.draw()
else:
gm.append('Unlisted proposal.name=%s Fix me.' % theProposal.name)
raise Glitch, gm
#return 0.0
if theProposal.doAbort:
return 0.0
else:
#print "...about to calculate the likelihood of the propTree. Model %s" % self.stMcmc.modelName
if self.stMcmc.modelName.startswith('SR2008_rf'):
if self.stMcmc.stRFCalc == 'fastReducedRF':
self.getTreeLogLike_fastReducedRF()
elif self.stMcmc.stRFCalc == 'purePython1':
self.getTreeLogLike_ppy1()
elif self.stMcmc.stRFCalc == 'bitarray':
self.refreshBitarrayPropTree()
self.getTreeLogLike_bitarray()
elif self.stMcmc.modelName == 'SPA':
self.refreshBitarrayPropTree()
self.getTreeLogLike_spa_bitarray()
elif self.stMcmc.modelName == 'QPA':
self.getTreeLogLike_qpa_slow()
else:
gm.append('Unknown model %s' % self.stMcmc.modelName)
raise Glitch, gm
#if theProposal.name == 'polytomy':
#print "propTree logLike is %f, curTree logLike is %f" % (
# self.propTree.logLike, self.curTree.logLike)
#myDist = self.propTree.topologyDistance(self.curTree)
#print "myDist %2i, propTree.logLike %.3f curTree.logLike %.3f " % (myDist, self.propTree.logLike, self.curTree.logLike)
logLikeRatio = self.propTree.logLike - self.curTree.logLike
#print logLikeRatio
#logLikeRatio = 0.0
theSum = logLikeRatio + self.logProposalRatio + self.logPriorRatio
#theSum = self.logProposalRatio + self.logPriorRatio
#if theProposal.name == 'polytomy':
# print "%f %f %f %f" % (theSum, logLikeRatio, self.logProposalRatio, self.logPriorRatio)
return theSum
def gen(self, aProposal):
gm = ['STChain.gen()']
# doAborts means that it was not a valid generation,
# neither accepted or rejected. Give up, by returning True.
acceptMove = False
#print "Doing %s" % aProposal.name
pRet = self.propose(aProposal)
#print "pRet = %.6f" % pRet,
if not aProposal.doAbort:
if pRet < -100.0: # math.exp(-100.) is 3.7200759760208361e-44
r = 0.0
elif pRet >= 0.0:
r = 1.0
else:
r = math.exp(pRet)
if r == 1.0:
acceptMove = True
elif random.random() < r:
acceptMove = True
#if aProposal.name == 'polytomy':
#print "acceptMove = %s" % acceptMove
#print "------------"
#print " %6.0f" % pRet
if 0 and acceptMove:
d1 = self.propTree.topologyDistance(self.curTree, metric='scqdist')
d2 = self.stMcmc.tree.topologyDistance(self.propTree, metric='scqdist')
print " %6.0f %5i %5i %5s" % (pRet, d1, d2, acceptMove)
aProposal.nProposals[self.tempNum] += 1
if acceptMove:
aProposal.accepted = True
aProposal.nAcceptances[self.tempNum] += 1
#if not aProposal.doAbort:
if acceptMove:
a = self.propTree
b = self.curTree
else:
a = self.curTree
b = self.propTree
if aProposal.name in ['nni', 'spr', 'polytomy']:
b.logLike = a.logLike
a.copyToTree(b)
elif aProposal.name in ['SR2008beta_uniform']:
b.logLike = a.logLike
b.beta = a.beta
elif aProposal.name in ['spaQ_uniform']:
b.logLike = a.logLike
b.spaQ = a.spaQ
else:
gm.append('Unlisted proposal.name = %s Fix me.' % aProposal.name)
raise Glitch, gm
# for proposal probs
fudgeFactor = {}
fudgeFactor['local'] = 1.5
class STMcmcTunings(object):
def __init__(self):
object.__setattr__(self, 'chainTemp', 0.15) # was 0.2
object.__setattr__(self, 'nni', None)
object.__setattr__(self, 'spr', None)
object.__setattr__(self, 'SR2008beta_uniform', 0.2)
object.__setattr__(self, 'spaQ_uniform', 0.1)
object.__setattr__(self, 'doPolytomyResolutionClassPrior', False)
object.__setattr__(self, 'polytomyPriorLogBigC', 0.0)
def __setattr__(self, item, val):
#print "Got request to set %s to %s" % (item, val)
if item in self.__dict__.keys():
# Here is where I should do the sanity checking of the new vals. Some day.
#print " Setting tuning '%s' to %s" % (item, val)
object.__setattr__(self, item, val)
else:
print self.dump()
gm = ["\nSTMcmcTunings.__setattr__()"]
gm.append("Can't set tuning '%s'-- no such tuning." % item)
raise Glitch, gm
def reprString(self, advice=True):
lst = ["\nSTMcmc.tunings:"]
spacer = ' ' * 4
lst.append("%s%20s: %s" % (spacer, 'chainTemp', self.chainTemp))
lst.append("%s%20s: %s" % (spacer, 'nni', self.nni))
lst.append("%s%20s: %s" % (spacer, 'spr', self.spr))
lst.append("%s%20s: %s" % (spacer, 'SR2008beta_uniform', self.SR2008beta_uniform))
lst.append("%s%20s: %s" % (spacer, 'spaQ_uniform', self.spaQ_uniform))
return string.join(lst, '\n')
def dump(self):
print self.reprString()
def __repr__(self):
return self.reprString()
class STMcmcProposalProbs(dict):
"""User-settable relative proposal probabilities.
An instance of this class is made as STMcmc.prob, where you can
do, for example,
yourSTMcmc.prob.nni = 2.0
These are relative proposal probs, that do not sum to 1.0, and
affect the calculation of the final proposal probabilities (ie the
kind that do sum to 1). It is a relative setting, and the default
is 1.0. Setting it to 0 turns it off. For small
probabilities, setting it to 2.0 doubles it. For bigger
probabilities, setting it to 2.0 makes it somewhat bigger.
Check the effect that it has by doing a
yourSTMcmc.writeProposalIntendedProbs()
which prints out the final calculated probabilities.
"""
def __init__(self):
object.__setattr__(self, 'nni', 1.0)
object.__setattr__(self, 'spr', 1.0)
object.__setattr__(self, 'SR2008beta_uniform', 1.0)
object.__setattr__(self, 'spaQ_uniform', 1.0)
object.__setattr__(self, 'polytomy', 0.0)
def __setattr__(self, item, val):
# complaintHead = "\nSTMcmcProposalProbs.__setattr__()"
gm = ["\nSTMcmcProposalProbs(). (set %s to %s)" % (item, val)]
theKeys = self.__dict__.keys()
if item in theKeys:
try:
val = float(val)
if val < 1e-9:
val = 0
object.__setattr__(self, item, val)
except:
gm.append("Should be a float. Got '%s'" % val)
raise Glitch, gm
else:
self.dump()
gm.append(" Can't set '%s'-- no such proposal." % item)
raise Glitch, gm
def reprString(self):
stuff = ["\nUser-settable relative proposal probabilities, from yourMcmc.prob"]
stuff.append(" To change it, do eg ")
stuff.append(" yourMcmc.prob.comp = 0.0 # turns comp proposals off")
stuff.append(" Current settings:")
theKeys = self.__dict__.keys()
theKeys.sort()
for k in theKeys:
stuff.append(" %20s: %s" % (k, getattr(self, k)))
return string.join(stuff, '\n')
def dump(self):
print self.reprString()
def __repr__(self):
return self.reprString()
class STProposal(object):
def __init__(self, theSTMcmc=None):
self.name = None
self.stMcmc = theSTMcmc # reference loop!
self.nChains = theSTMcmc.nChains
self.pNum = -1
self.mtNum = -1
self.weight = 1.0
self.nProposals = [0] * self.nChains
self.nAcceptances = [0] * self.nChains
self.accepted = 0
self.doAbort = False
self.nAborts = [0] * self.nChains
def dump(self):
print "proposal name=%-10s pNum=%2i, mtNum=%2i, weight=%5.1f, tuning=%7.2f" % (
'%s,' % self.name, self.pNum, self.mtNum, self.weight, self.tuning)
print " nProposals by temperature: %s" % self.nProposals
print " nAcceptances by temperature: %s" % self.nAcceptances
def _getTuning(self):
if self.name in ['nni', 'spr', 'SR2008beta_uniform', 'spaQ_uniform']:
#print "getting tuning for %s, returning %f" % (self.name, getattr(self.mcmc.tunings, self.name))
#print self.stMcmc.tunings
return getattr(self.stMcmc.tunings, self.name)
else:
return None
def _setTuning(self, whatever):
raise Glitch, "Can't set tuning this way."
def _delTuning(self):
raise Glitch, "Can't del tuning."
tuning = property(_getTuning, _setTuning, _delTuning)
class BigTSplitStuff(object):
# An organizer for splits on STMcmc.tree (ie bigT) internal nodes, only for use with bitarray
def __init__(self):
self.spl = None
self.spl2 = None
self.theSpl = None
self.maskedSplitWithFirstTaxOne = None
self.onesCount = None
self.bytes = None
def dump(self):
print "ss: spl=%s, spl2=%s, masked=%s, onesCount=%s" % (
self.spl, self.spl2, self.maskedSplitWithFirstTaxOne, self.onesCount)
class STMcmc(object):
"""An MCMC for making supertrees from a set of input trees.
This week, it implements the Steel and Rodrigo 2008 model, with the
alpha calculation using the approximation in Bryant and Steel 2009.
**Arguments**
inTrees
A list of p4 tree objects. You could just use ``var.trees``.
modelName
The SR2008 models implemented here are based on the Steel and
Rodrigo 2008 description of a likelihood model, "Maximum
likelihood supertrees" Syst. Biol. 57(2):243--250, 2008. At
the moment, they are all SR2008_rf, meaning that they use
Robinson-Foulds distances.
SR2008_rf_ia
Here 'ia' means 'ignore alpha'. The alpha values are not
calculated at all, as they are presumed (erroneously, but
not too badly) to cancel out.
SR2008_rf_aZ
This uses the approximation for Z_T = alpha^{-1} as described
in Equation 30 in the Bryant and Steel paper "Computing the
distribution of a tree metric" in IEEE/ACM Transactions on
computational biology and bioinformatics, VOL. 6, 2009.
SR2008_rf_aZ_fb
This is as SR2008_rf_aZ above, but additionally it allows
beta to be a free parameter, and it is sampled. Samples
are written to mcmc_prams* files.
beta
This only applies to SR2008. The beta is the weight as
given in Steel and Rodrigo 2008. By default it is 1.0.
stRFCalc
There are three ways to calculate the RF distances and
likelihood, for these SR2008_rf models above --- all giving
the same answer.
1. purePython1. Slow.
2. bitarray, using the bitarray module. About twice as fast
as purePython1
3. fastReducedRF, written in C++ using boost and ublas.
About 10 times faster than purePython1, but perhaps a bit
of a bother to get going. It needs the fastReducedRF
module, included in the p4 source code.
It is under control of the argument stRFCalc, which can be one
of 'purePython1', 'bitarray', and 'fastReducedRF'. By default
it is purePython1, so you may want to at least install
bitarray.
runNum
You may want to do more than one 'run' in the same directory,
to facilitate convergence testing. The first runNum would be
0, and samples, likelihoods, and checkPoints are written to
files with that number.
sampleInterval
Interval at which the chain is sampled, including writing a tree,
and the logLike. Plan to get perhaps 1000 samples; so if you are
planning to make a run of 10000 generations then you might set
sampleInterval=10.
checkPointInterval
Interval at which checkpoints are made. If set to None (the
default) it means don't make checkpoints. My taste is to aim to
make perhaps 2 to 4 per run. So if you are planning to start out
with a run of 10000 generations, you could set
checkPointInterval=5000, which will give you 2 checkpoints. See
more about checkpointing below.
To prepare for a run, instantiate an Mcmc object, for example::
m = STMcmc(treeList, modelName='SR2008_rf_aZ_fb', stRFCalc='fastReducedRF', sampleInterval=10)
To start it running, do this::
# Tell it the number of generations to do
m.run(10000)
As it runs, it saves trees and likelihoods at sampleInterval
intervals (actually whenever the current generation number is
evenly divisible by the sampleInterval).
**CheckPoints**
Whenever the current generation number is evenly divisible by the
checkPointInterval it will write a checkPoint file. A checkPoint
file is the whole MCMC, pickled. Using a checkPoint, you can
re-start an STMcmc from the point you left off. Or, in the event
of a crash, you can restart from the latest checkPoint. But the
most useful thing about them is that you can query checkPoints to
get information about how the chain has been running, and about
convergence diagnostics.
In order to restart the MCMC from the end of a previous run::
# read the last checkPoint file
m = func.unPickleStMcmc(0) # runNum 0
m.run(20000)
Its that easy if your previous run finished properly. However, if
your previous run has crashed and you want to restart it from a
checkPoint, then you will need to repair the sample output files
to remove samples that were taken after the last checkPoint, but
before the crash. Fix the trees, likelihoods, prams, and sims.
(You probably do not need to beware of confusing gen (eg 9999) and
gen+1 (eg 10000) issues.) When you remove trees from the tree
files be sure to leave the 'end;' at the end-- p4 needs it, and
will deal with it.
The checkPoints can help with convergence testing. To help with
that, you can use the STMcmcCheckPointReader class. It will print
out a table of average standard deviations of split supports
between 2 runs, or between 2 checkPoints from the same run. It
will print out tables of proposal acceptances to show whether they
change over the course of the MCMC.
**Making a consensus tree**
See :class:`TreePartitions`.
"""
def __init__(self, inTrees, bigT=None, modelName='SR2008_rf_aZ', beta=1.0, spaQ=0.5, stRFCalc='purePython1', runNum=0, sampleInterval=100, checkPointInterval=None):
gm = ['STMcmc.__init__()']
assert inTrees
for t in inTrees:
assert isinstance(t, Tree)
if bigT:
assert isinstance(bigT, Tree)
assert bigT.taxNames
bigT.stripBrLens()
for n in bigT.iterInternalsNoRoot():
n.name = None
goodModelNames = ['SR2008_rf_ia', 'SR2008_rf_aZ', 'SR2008_rf_aZ_fb', 'SPA', 'QPA']
if modelName not in goodModelNames:
gm.append("Arg modelName '%s' is not recognized. " % modelName)
gm.append("Good modelNames are %s" % goodModelNames)
raise Glitch, gm
self.modelName = modelName
self.stRFCalc = None
if modelName.startswith("SR2008"):
try:
fBeta = float(beta)
except ValueError:
gm.append("Arg beta (%s) should be a float" % beta)
raise Glitch, gm
self.beta = fBeta
for t in inTrees:
if t.isFullyBifurcating():
pass
else:
gm.append("At the moment STMcmc wants trees that are fully bifurcating.")
raise Glitch, gm
goodSTRFCalcNames = ['purePython1', 'bitarray', 'fastReducedRF']
if stRFCalc not in goodSTRFCalcNames:
gm.append("Arg stRFCalc '%s' is not recognized. " % modelName)
gm.append("Good stRFCalc names are %s" % goodSTRFCalcNames)
raise Glitch, gm
self.stRFCalc = stRFCalc
if modelName in ['SPA', 'QPA']:
try:
fspaQ = float(spaQ)
except ValueError:
gm.append("Arg spaQ (%s) should be a float" % spaQ)
raise Glitch, gm
self.spaQ = fspaQ
nChains = 1 # mcmcmc is off, temporarily
try:
nChains = int(nChains)
except (ValueError,TypeError):
gm.append("nChains should be an int, 1 or more. Got %s" % nChains)
raise Glitch, gm
if nChains < 1:
gm.append("nChains should be an int, 1 or more. Got %s" % nChains)
raise Glitch, gm
self.nChains = nChains
self.chains = []
self.gen = -1
self.startMinusOne = -1
self.constraints = None
self.simulate = None
try:
runNum = int(runNum)
except (ValueError, TypeError):
gm.append("runNum should be an int, 0 or more. Got %s" % runNum)
raise Glitch, gm
if runNum < 0:
gm.append("runNum should be an int, 0 or more. Got %s" % runNum)
raise Glitch, gm
self.runNum = runNum
# Check that we are not going to over-write good stuff
ff = os.listdir(os.getcwd())
hasPickle = False
for fName in ff:
if fName.startswith("mcmc_checkPoint_%i." % self.runNum):
hasPickle = True
break
if hasPickle:
gm.append("runNum is set to %i" % self.runNum)
gm.append("There is at least one mcmc_checkPoint_%i.xxx file in this directory." % self.runNum)
gm.append("This is a new STMcmc, and I am refusing to over-write exisiting files.")
gm.append("Maybe you want to re-start from the latest mcmc_checkPoint_%i file?" % self.runNum)
gm.append("Otherwise, get rid of the existing mcmc_xxx_%i.xxx files and start again." % self.runNum)
raise Glitch, gm
if var.strictRunNumberChecking:
# We want to start runs with number 0, so if runNum is more than that, check that there are other runs.
if self.runNum > 0:
for runNum2 in range(self.runNum):
hasTrees = False
for fName in ff:
if fName.startswith("mcmc_trees_%i" % runNum2):
hasTrees = True
break
if not hasTrees:
gm.append("runNum is set to %i" % self.runNum)
gm.append("runNums should go from zero up.")
gm.append("There are no mcmc_trees_%i.nex files to show that run %i has been done." % (runNum2, runNum2))
gm.append("Set the runNum to that, first.")
raise Glitch, gm
self.sampleInterval = sampleInterval
self.checkPointInterval = checkPointInterval
self.proposals = []
self.proposalsHash = {}
self.propWeights = []
self.cumPropWeights = []
self.totalPropWeights = 0.0
self.treePartitions = None
self.likesFileName = "mcmc_likes_%i" % runNum
self.treeFileName = "mcmc_trees_%i.nex" % runNum
self.pramsFileName = "mcmc_prams_%i" % runNum
self.writePrams = False
if self.modelName in ['SR2008_rf_aZ_fb', "SPA", "QPA"]:
self.writePrams = True
self.lastTimeCheck = None
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
else:
self.swapMatrix = None
self.tunings = STMcmcTunings()
self.prob = STMcmcProposalProbs()
if self.modelName in ['SPA', 'QPA']:
self.prob.polytomy = 1.0
self.prob.spr = 0.0
# Zap internal node names
# for n in aTree.root.iterInternals():
# if n.name:
# n.name = None
if not bigT:
allNames = []
for t in inTrees:
t.unsorted_taxNames = [n.name for n in t.iterLeavesNoRoot()]
allNames += t.unsorted_taxNames # Efficient? Probably does not matter.
self.taxNames = list(set(allNames))
self.taxNames.sort() # not needed, but nice for debugging
else:
for t in inTrees:
t.unsorted_taxNames = [n.name for n in t.iterLeavesNoRoot()]
self.taxNames = bigT.taxNames
#print self.taxNames
self.nTax = len(self.taxNames)
if self.modelName in ['SPA'] or self.stRFCalc == 'bitarray':
#print "self.taxNames = ", self.taxNames
for t in inTrees:
#print "-" * 50
#t.draw()
sorted_taxNames = []
t.baTaxBits = []
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
t.baTaxBits.append(True)
else:
t.baTaxBits.append(False)
t.taxNames = sorted_taxNames
t.baTaxBits = bitarray.bitarray(t.baTaxBits)
t.firstTax = t.baTaxBits.index(1)
#print "intree baTaxBits is %s" % t.baTaxBits
#print "intree firstTax is %i" % t.firstTax
# Can't use Tree.makeSplitKeys(), unfortunately. So
# make split keys here. STMcmc.tBits is only used for
# the leaves, here and in
# STChain.setupBitarrayCalcs(), and there only once,
# during STChain.__init__(). So probably does not
# need to be an instance attribute. Maybe delete?
self.tBits = [False] * self.nTax
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
self.tBits[spot] = True
n.stSplitKey = bitarray.bitarray(self.tBits)
self.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey.copy()
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
#print "setting node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
t.splSet = set()
for n in t.iterInternalsNoRoot():
if not n.stSplitKey[t.firstTax]: # make sure splitKey[firstTax] is a '1'
n.stSplitKey.invert()
n.stSplitKey &= t.baTaxBits # 'and', in-place
#print "inverting and and-ing node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
t.splSet.add(n.stSplitKey.tobytes()) # bytes so that I can use it as a set element
if self.modelName in ['QPA']:
for t in inTrees:
sorted_taxNames = []
t.taxBits = []
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
t.taxBits.append(1L << tNum)
else:
t.taxBits.append(0)
t.taxNames = sorted_taxNames
#print "intree taxBits is %s" % t.taxBits
# Can't use Tree.makeSplitKeys(), unfortunately. So
# make split keys here. STMcmc.tBits is only used for
# the leaves, here and in
# STChain.setupBitarrayCalcs(), and there only once,
# during STChain.__init__(). So probably does not
# need to be an instance attribute. Maybe delete?
#self.tBits = [False] * self.nTax
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
#self.tBits[spot] = True
n.stSplitKey = 1L << spot
#self.tBits[spot] = False
else:
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
#print "setting node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
# t.splSet = set()
# for n in t.iterInternalsNoRoot():
# if not n.stSplitKey[t.firstTax]: # make sure splitKey[firstTax] is a '1'
# n.stSplitKey.invert()
# n.stSplitKey &= t.baTaxBits # 'and', in-place
# #print "inverting and and-ing node %i stSplitKey to %s" % (n.nodeNum, n.stSplitKey)
# t.splSet.add(n.stSplitKey.tobytes()) # bytes so that I can use it as a set element
t.skk = [n.stSplitKey for n in t.iterInternalsNoRoot()]
t.qSet = set()
for sk in t.skk:
ups = [txBit for txBit in t.taxBits if (sk & txBit)]
downs = [txBit for txBit in t.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
if down[0] > down[1]:
down = (down[1], down[0])
for up in itertools.combinations(ups, 2):
if up[0] > up[1]:
up = (up[1], up[0])
if down[0] < up[0]:
t.qSet.add(down+up)
else:
t.qSet.add(up+down)
#print t.qSet
t.nQuartets = len(t.qSet)
self.trees = inTrees
if bigT:
self.tree = bigT
else:
self.tree = func.randomTree(taxNames=self.taxNames, name='stTree', randomBrLens=False)
if self.stRFCalc in ['purePython1', 'fastReducedRF']:
for t in inTrees:
sorted_taxNames = []
t.taxBits = 0L
for tNum in range(self.nTax):
tN = self.taxNames[tNum]
if tN in t.unsorted_taxNames:
sorted_taxNames.append(tN)
adder = 1L << tNum
t.taxBits += adder
t.taxNames = sorted_taxNames
t.allOnes = 2L**(t.nTax) - 1
t.makeSplitKeys()
t.skSet = set([n.br.splitKey for n in t.iterInternalsNoRoot()])
if self.stRFCalc in ['purePython1', 'fastReducedRF']:
self.tree.makeSplitKeys()
self.Frrf = None
if self.stRFCalc == 'fastReducedRF':
try:
import fastReducedRF
self.Frrf = fastReducedRF.Frrf
import pyublas # not explicitly used--but makes converters available
except ImportError:
gm.append("var.stRFCalc is set to 'fastReducedRF', but I could not import")
gm.append("at least one of fastReducedRF or pyublas.")
gm.append("Make sure they are installed.")
raise Glitch, gm
if self.modelName in ['QPA']:
self.tree.taxBits = [1L << i for i in range(self.tree.nTax)]
t = self.tree
for n in t.iterPostOrder():
if n == t.root:
break
if n.isLeaf:
spot = self.taxNames.index(n.name)
n.stSplitKey = 1L << spot
else:
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
t.skk = [n.stSplitKey for n in t.iterInternalsNoRoot()]
t.qSet = set()
for sk in t.skk:
ups = [txBit for txBit in t.taxBits if (sk & txBit)]
downs = [txBit for txBit in t.taxBits if not (sk & txBit)]
for down in itertools.combinations(downs, 2):
assert down[0] < down[1] # probably not needed
for up in itertools.combinations(ups, 2):
assert up[0] < up[1] # probably not needed
if down[0] < up[0]:
t.qSet.add(down+up)
else:
t.qSet.add(up+down)
#print t.qSet
t.nQuartets = len(t.qSet)
print "Initializing STMcmc"
print "%-10s: %s" % ('modelName', modelName)
if self.modelName.startswith("SR2008"):
print "%-10s: %s" % ('stRFCalc', self.stRFCalc)
print "%-10s: %s" % ('inTrees', len(self.trees))
print "%-10s: %s" % ('nTax', self.nTax)
def _makeProposals(self):
"""Make proposals for the STMcmc."""
gm = ['STMcmc._makeProposals()']
# nni
if self.prob.nni:
p = STProposal(self)
p.name = 'nni'
p.weight = self.prob.nni # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
if self.prob.spr:
p = STProposal(self)
p.name = 'spr'
p.weight = self.prob.spr # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
if self.modelName in ['SR2008_rf_aZ_fb']:
if self.prob.SR2008beta_uniform:
p = STProposal(self)
p.name = 'SR2008beta_uniform'
p.weight = self.prob.SR2008beta_uniform # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
#object.__setattr__(self.tuningsUsage, 'local', p)
if self.modelName in ['SPA', 'QPA']:
if self.prob.spaQ_uniform:
p = STProposal(self)
p.name = 'spaQ_uniform'
p.weight = self.prob.spaQ_uniform # * (len(self.tree.nodes) - 1) * fudgeFactor['nni']
self.proposals.append(p)
#object.__setattr__(self.tuningsUsage, 'local', p)
if self.prob.polytomy:
p = STProposal(self)
p.name = 'polytomy'
p.weight = self.prob.polytomy
self.proposals.append(p)
if not self.proposals:
gm.append("No proposals?")
raise Glitch, gm
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
for p in self.proposals:
self.proposalsHash[p.name] = p
def _refreshProposalProbsAndTunings(self):
"""Adjust proposals after a restart."""
gm = ['STMcmc._refreshProposalProbsAndTunings()']
for p in self.proposals:
# nni
if p.name == 'nni':
#p.weight = self.prob.local * (len(self.tree.nodes) - 1) * fudgeFactor['local']
p.weight = self.prob.nni
self.propWeights = []
for p in self.proposals:
self.propWeights.append(p.weight)
self.cumPropWeights = [self.propWeights[0]]
for i in range(len(self.propWeights))[1:]:
self.cumPropWeights.append(self.cumPropWeights[i - 1] + self.propWeights[i])
self.totalPropWeights = sum(self.propWeights)
if self.totalPropWeights < 1e-9:
gm.append("No proposal weights?")
raise Glitch, gm
def writeProposalAcceptances(self):
"""Pretty-print the proposal acceptances."""
if (self.gen - self.startMinusOne) <= 0:
print "\nSTMcmc.writeProposalAcceptances() There is no info in memory. "
print " Maybe it was just emptied after writing to a checkpoint? "
print "If so, read the checkPoint and get the proposalAcceptances from there."
else:
spacer = ' ' * 8
print "\nProposal acceptances, run %i, for %i gens, from gens %i to %i, inclusive." % (
self.runNum, (self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print "%s %20s %10s %13s%8s" % (spacer, 'proposal', 'nProposals', 'acceptance(%)', 'tuning')
for p in self.proposals:
print "%s" % spacer,
print "%20s" % p.name,
print "%10i" % p.nProposals[0],
if p.nProposals[0]: # Don't divide by zero
print " %5.1f " % (100.0 * float(p.nAcceptances[0]) / float(p.nProposals[0])),
else:
print " - ",
if p.tuning == None:
print " -",
elif p.tuning < 2.0:
print " %5.3f" % p.tuning,
else:
print "%7.1f" % p.tuning,
print
# # Tabulate topology changes, if any were attempted.
# doTopol = 0
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# for tNum in range(self.nChains):
# if p.nTopologyChangeAttempts[tNum]:
# doTopol = 1
# break
# if doTopol:
# p = self.proposalsHash['local']
# print "'Local' proposal-- attempted topology changes"
# print "%s tempNum nProps nAccepts percent nTopolChangeAttempts nTopolChanges percent" % spacer
# for tNum in range(self.nChains):
# print "%s" % spacer,
# print "%4i " % tNum,
# print "%9i" % p.nProposals[tNum],
# print "%8i" % p.nAcceptances[tNum],
# print " %5.1f" % (100.0 * float(p.nAcceptances[tNum]) / float(p.nProposals[tNum])),
# print "%20i" % p.nTopologyChangeAttempts[tNum],
# print "%13i" % p.nTopologyChanges[tNum],
# print " %5.1f" % (100.0 * float(p.nTopologyChanges[tNum])/float(p.nTopologyChangeAttempts[tNum]))
# else:
# print "%sFor the 'local' proposals, there were no attempted" % spacer
# print "%stopology changes in any of the chains." % spacer
# Check for aborts.
# p = None
# try:
# p = self.proposalsHash['local']
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# if p.nAborts[0]:
# print "The 'local' proposal had %i aborts." % p.nAborts[0]
# print "(Aborts might be due to brLen proposals too big or too small)"
# if self.constraints:
# print "(Or, more likely, due to violated constraints.)"
# else:
# print "The 'local' proposal had no aborts (either due to brLen proposals"
# print "too big or too small, or due to violated constraints)."
# for pN in ['polytomy', 'compLocation', 'rMatrixLocation', 'gdasrvLocation']:
# p = None
# try:
# p = self.proposalsHash[pN]
# except KeyError:
# pass
# if p:
# if hasattr(p, 'nAborts'):
# print "The %15s proposal had %5i aborts." % (p.name, p.nAborts[0])
def writeSwapMatrix(self):
print "\nChain swapping, for %i gens, from gens %i to %i, inclusive." % (
(self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print " Swaps are presented as a square matrix, nChains * nChains."
print " Upper triangle is the number of swaps proposed between two chains."
print " Lower triangle is the percent swaps accepted."
print " The current tunings.chainTemp is %5.3f\n" % self.tunings.chainTemp
print " " * 10,
for i in range(self.nChains):
print "%7i" % i,
print
print " " * 10,
for i in range(self.nChains):
print " ----",
print
for i in range(self.nChains):
print " " * 7, "%2i" % i,
for j in range(self.nChains):
if i < j: # upper triangle
print "%7i" % self.swapMatrix[i][j],
elif i == j:
print " -",
else:
if self.swapMatrix[j][i] == 0: # no proposals
print " -",
else:
print " %5.1f" % (100.0 * float(self.swapMatrix[i][j]) / float(self.swapMatrix[j][i])),
print
def _makeChainsAndProposals(self):
"""Make chains and proposals."""
gm = ['STMcmc._makeChainsAndProposals()']
#random.seed(0)
# Make chains, if needed
if not self.chains:
self.chains = []
for chNum in range(self.nChains):
aChain = STChain(self)
aChain.tempNum = chNum # Temperature. Set this way to start, but it changes.
self.chains.append(aChain)
if not self.proposals:
self._makeProposals()
# If we are going to be doing the resolution class prior
# in the polytomy move, we want to pre-compute the logs of
# T_{n,m}. Its a vector with indices (ie m) from zero to
# nTax-2 inclusive.
# if self.proposalsHash.has_key('polytomy') and self.tunings.doPolytomyResolutionClassPrior:
# p = self.proposalsHash['polytomy']
# bigT = func.nUnrootedTreesWithMultifurcations(self.tree.nTax)
# p.logBigT = [0.0] * (self.tree.nTax - 1)
# for i in range(1, self.tree.nTax - 1):
# p.logBigT[i] = math.log(bigT[i])
# #print p.logBigT
def _setOutputTreeFile(self):
"""Setup the (output) tree file for the STMcmc."""
gm = ['STMcmc._setOutputTreeFile()']
# Write the preamble for the trees outfile.
self.treeFile = file(self.treeFileName, 'w')
self.treeFile.write('#nexus\n\n')
self.treeFile.write('begin taxa;\n')
self.treeFile.write(' dimensions ntax=%s;\n' % self.tree.nTax)
self.treeFile.write(' taxlabels')
for tN in self.tree.taxNames:
self.treeFile.write(' %s' % func.nexusFixNameIfQuotesAreNeeded(tN))
self.treeFile.write(';\nend;\n\n')
self.treeFile.write('begin trees;\n')
self.translationHash = {}
i = 1
for tName in self.tree.taxNames:
self.translationHash[tName] = i
i += 1
self.treeFile.write(' translate\n')
for i in range(self.tree.nTax - 1):
self.treeFile.write(' %3i %s,\n' % (
i + 1, func.nexusFixNameIfQuotesAreNeeded(self.tree.taxNames[i])))
self.treeFile.write(' %3i %s\n' % (
self.tree.nTax, func.nexusFixNameIfQuotesAreNeeded(self.tree.taxNames[-1])))
self.treeFile.write(' ;\n')
self.treeFile.write(' [Tree numbers are gen+1]\n')
self.treeFile.close()
def run(self, nGensToDo, verbose=True):
"""Start the STMcmc running."""
gm = ['STMcmc.run()']
#Keep track of the first gen of this call to run(), maybe restart
firstGen = self.gen + 1
if self.checkPointInterval:
# We want a couple of things:
# 1. The last gen should be on checkPointInterval. For
# example, if the checkPointInterval is 200, then doing
# 100 or 300 generations will not be allowed cuz the
# chain would continue past the checkPoint-- bad. Or if
# you re-start after 500 gens and change to a
# checkPointInterval of 200, then you won't be allowed to
# do 500 gens.
#if ((self.gen + 1) + nGensToDo) % self.checkPointInterval == 0:
if nGensToDo % self.checkPointInterval == 0:
pass
else:
gm.append("With the current settings, the last generation won't be on a checkPointInterval.")
gm.append("self.gen+1=%i, nGensToDo=%i, checkPointInterval=%i" % ((self.gen + 1),
nGensToDo, self.checkPointInterval))
raise Glitch, gm
# 2. We also want the checkPointInterval to be evenly
# divisible by the sampleInterval.
if self.checkPointInterval % self.sampleInterval == 0:
pass
else:
gm.append("The checkPointInterval (%i) should be evenly divisible" % self.checkPointInterval)
gm.append("by the sampleInterval (%i)." % self.sampleInterval)
raise Glitch, gm
if self.proposals:
# Its either a re-start, or it has been thru autoTune().
# I can tell the difference by self.gen, which is -1 after
# autoTune()
if self.gen == -1:
self._makeChainsAndProposals()
self._setOutputTreeFile()
#if self.simulate:
# self.writeSimFileHeader(self.tree)
# The probs and tunings may have been changed by the user.
self._refreshProposalProbsAndTunings()
# This stuff below should be the same as is done after pickling, see below.
self.startMinusOne = self.gen
# Start the tree partitions over.
self.treePartitions = None
# Zero the proposal counts
for p in self.proposals:
p.nProposals = [0] * self.nChains
p.nAcceptances = [0] * self.nChains
p.nTopologyChangeAttempts = [0] * self.nChains
p.nTopologyChanges = [0] * self.nChains
# Zero the swap matrix
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
else:
self._makeChainsAndProposals()
self._setOutputTreeFile()
#if self.simulate:
# self.writeSimFileHeader(self.tree)
if verbose:
self.writeProposalIntendedProbs()
sys.stdout.flush()
coldChainNum = 0
# If polytomy is turned on, then it is possible to get a star
# tree, in which case local will not work. So if we have both
# polytomy and local proposals, we should also have brLen.
# if self.proposalsHash.has_key("polytomy") and self.proposalsHash.has_key("local"):
# if not self.proposalsHash.has_key('brLen'):
# gm.append("If you have polytomy and local proposals, you should have a brLen proposal as well.")
# gm.append("It can have a low proposal probability, but it needs to be there.")
# gm.append("Turn it on by eg yourMcmc.prob.brLen = 0.001")
# raise Glitch, gm
if self.gen > -1:
# it is a re-start, so we need to back over the "end;" in the tree files.
f2 = file(self.treeFileName, 'a+')
pos = -1
while 1:
f2.seek(pos, 2)
c = f2.read(1)
if c == ';':
break
pos -= 1
#print "pos now %i" % pos
pos -= 3 # end;
f2.seek(pos, 2)
c = f2.read(4)
#print "got c = '%s'" % c
if c != "end;":
gm.append("Mcmc.run(). Failed to find and remove the 'end;' at the end of the tree file.")
raise Glitch, gm
else:
f2.seek(pos, 2)
f2.truncate()
f2.close()
if verbose:
print
print "Re-starting the MCMC run %i from gen=%i" % (self.runNum, self.gen)
print "Set to do %i more generations." % nGensToDo
#if self.writePrams:
# if self.chains[0].curTree.model.nFreePrams == 0:
# print "There are no free prams in the model, so I am turning writePrams off."
# self.writePrams = False
sys.stdout.flush()
self.startMinusOne = self.gen
else:
if verbose:
print "Starting the MCMC %s run %i" % ((self.constraints and "(with constraints)" or ""), self.runNum)
print "Set to do %i generations." % nGensToDo
if self.writePrams:
# if self.chains[0].curTree.model.nFreePrams == 0:
# print "There are no free prams in the model, so I am turning writePrams off."
# self.writePrams = False
# else:
pramsFile = file(self.pramsFileName, 'a')
if self.modelName.startswith("SR2008"):
pramsFile.write(" genPlus1 beta\n")
elif self.modelName.startswith("SPA"):
pramsFile.write(" genPlus1 spaQ\n")
elif self.modelName.startswith("QPA"):
pramsFile.write(" genPlus1 spaQ\n")
pramsFile.close()
sys.stdout.flush()
if verbose:
print "Sampling every %i." % self.sampleInterval
if self.checkPointInterval:
print "CheckPoints written every %i." % self.checkPointInterval
if nGensToDo <= 20000:
print "One dot is 100 generations."
else:
print "One dot is 1000 generations."
sys.stdout.flush()
self.treePartitions = None
realTimeStart = time.time()
self.lastTimeCheck = time.time()
abortableProposals = ['nni', 'spr', 'polytomy']
for gNum in range(nGensToDo):
self.gen += 1
#Do an initial time estimate based on 100 gens
if nGensToDo > 100 and self.gen-firstGen == 100:
diff_secs = time.time() - realTimeStart
total_secs = (float(nGensToDo)/float(100))*float(diff_secs)
deltaTime = datetime.timedelta(seconds = int(round(total_secs)))
print "Estimated completion time: %s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
# Above is a list of proposals where it is possible to abort.
# When a gen(aProposal) is made, below, aProposal.doAbort
# might be set, in which case we want to skip it for this
# gen. But we want to start each 'for chNum' loop with
# doAborts all turned off.
for chNum in range(self.nChains):
failure = True
nAttempts = 0
while failure:
# Get the next proposal
gotIt = False
safety = 0
while not gotIt:
theRan = random.uniform(0.0, self.totalPropWeights)
for i in range(len(self.cumPropWeights)):
if theRan < self.cumPropWeights[i]:
break
aProposal = self.proposals[i]
gotIt = True
if aProposal.name == 'nni':
if self.chains[chNum].curTree.nInternalNodes == 1: # Can't do nni on a star tree.
aProposal = self.proposalsHash['polytomy']
#elif aProposal.name == 'root3':
# if self.chains[chNum].curTree.nInternalNodes == 1: # Can't do root3 on a star tree.
# gotIt = False
if aProposal.doAbort:
gotIt = False
safety += 1
if safety > 1000:
gm.append("Could not find a proposal after %i attempts." % safety)
gm.append("Possibly a programming error.")
gm.append("Or possibly it is just a pathologically frustrating Mcmc.")
raise Glitch, gm
#if gNum % 2:
# aProposal = self.proposalsHash['brLen']
#else:
# aProposal = self.proposalsHash['comp']
if 0:
print "==== gNum=%i, chNum=%i, aProposal=%s (part %i)" % (
gNum, chNum, aProposal.name, aProposal.pNum),
sys.stdout.flush()
#print gNum,
failure = self.chains[chNum].gen(aProposal) # success returns None
if 0:
if failure:
print " failure"
else:
print
nAttempts += 1
if nAttempts > 1000:
gm.append("Was not able to do a successful generation after %i attempts." % nAttempts)
raise Glitch, gm
#print " Mcmc.run(). finished a gen on chain %i" % (chNum)
for pr in abortableProposals:
if self.proposalsHash.has_key(pr):
self.proposalsHash[pr].doAbort = False
# Do swap, if there is more than 1 chain.
if self.nChains == 1:
coldChain = 0
else:
# Chain swapping stuff was lifted from MrBayes. Thanks again.
chain1,chain2 = random.sample(self.chains, 2)
# Use the upper triangle of swapMatrix for nProposed's
if chain1.tempNum < chain2.tempNum:
self.swapMatrix[chain1.tempNum][chain2.tempNum] += 1
else:
self.swapMatrix[chain2.tempNum][chain1.tempNum] += 1
lnR = (1.0 / (1.0 + (self.tunings.chainTemp * chain1.tempNum))) * chain2.curTree.logLike
lnR += (1.0 / (1.0 + (self.tunings.chainTemp * chain2.tempNum))) * chain1.curTree.logLike
lnR -= (1.0 / (1.0 + (self.tunings.chainTemp * chain1.tempNum))) * chain1.curTree.logLike
lnR -= (1.0 / (1.0 + (self.tunings.chainTemp * chain2.tempNum))) * chain2.curTree.logLike
if lnR < -100.0:
r = 0.0
elif lnR >= 0.0:
r = 1.0
else:
r = math.exp(lnR)
acceptSwap = 0
if random.random() < r:
acceptSwap = 1
if acceptSwap:
# Use the lower triangle of swapMatrix to keep track of nAccepted's
if chain1.tempNum < chain2.tempNum:
self.swapMatrix[chain2.tempNum][chain1.tempNum] += 1
else:
self.swapMatrix[chain1.tempNum][chain2.tempNum] += 1
# Do the swap
chain1.tempNum, chain2.tempNum = chain2.tempNum, chain1.tempNum
# Find the cold chain, the one where tempNum is 0
coldChainNum = -1
for i in range(len(self.chains)):
if self.chains[i].tempNum == 0:
coldChainNum = i
break
if coldChainNum == -1:
gm.append("Unable to find which chain is the cold chain. Bad.")
raise Glitch, gm
# If it is a writeInterval, write stuff
if (self.gen + 1) % self.sampleInterval == 0:
if 1:
likesFile = file(self.likesFileName, 'a')
likesFile.write('%11i %f\n' % (self.gen + 1, self.chains[coldChainNum].curTree.logLike))
likesFile.close()
treeFile = file(self.treeFileName, 'a')
treeFile.write(" tree t_%i = [&U] " % (self.gen + 1))
self.chains[coldChainNum].curTree.writeNewick(treeFile,
withTranslation=1,
translationHash=self.translationHash,
doMcmcCommandComments=False)
treeFile.close()
if self.writePrams:
pramsFile = file(self.pramsFileName, 'a')
#pramsFile.write("%12i " % (self.gen + 1))
pramsFile.write("%12i" % (self.gen + 1))
if self.modelName.startswith("SR2008"):
pramsFile.write(" %f\n" % self.chains[coldChainNum].curTree.beta)
elif self.modelName in ["SPA", "QPA"]:
pramsFile.write(" %f\n" % self.chains[coldChainNum].curTree.spaQ)
pramsFile.close()
# Do a simulation
if self.simulate:
#print "about to simulate..."
self.doSimulate(self.chains[coldChainNum].curTree)
#print "...finished simulate."
# Do other stuff.
if hasattr(self, 'hook'):
self.hook(self.chains[coldChainNum].curTree)
if 0 and self.constraints:
print "Mcmc x1c"
print self.chains[0].verifyIdentityOfTwoTreesInChain()
print "b checking curTree .."
self.chains[0].curTree.checkSplitKeys()
print "b checking propTree ..."
self.chains[0].propTree.checkSplitKeys()
print "Mcmc xxx"
# Add curTree to treePartitions
if self.treePartitions:
self.treePartitions._getSplitsFromTree(self.chains[coldChainNum].curTree)
else:
self.treePartitions = TreePartitions(self.chains[coldChainNum].curTree)
# After _getSplitsFromTree, need to follow, at some point,
# with _finishSplits(). Do that when it is pickled, or at the end of the run.
# Checking and debugging constraints
if 0 and self.constraints:
print "Mcmc x1d"
print self.chains[coldChainNum].verifyIdentityOfTwoTreesInChain()
print "c checking curTree ..."
self.chains[coldChainNum].curTree.checkSplitKeys()
print "c checking propTree ..."
self.chains[coldChainNum].propTree.checkSplitKeys()
#print "c checking that all constraints are present"
#theSplits = [n.br.splitKey for n in self.chains[0].curTree.iterNodesNoRoot()]
#for sk in self.constraints.constraints:
# if sk not in theSplits:
# gm.append("split %i is not present in the curTree." % sk)
# raise Glitch, gm
print "Mcmc zzz"
# Check that the curTree has all the constraints
if self.constraints:
splitsInCurTree = [n.br.splitKey for n in self.chains[coldChainNum].curTree.iterInternalsNoRoot()]
for sk in self.constraints.constraints:
if sk not in splitsInCurTree:
gm.append("Programming error.")
gm.append("The current tree (the last tree sampled) does not contain constraint")
gm.append("%s" % func.getSplitStringFromKey(sk, self.tree.nTax))
raise Glitch, gm
# If it is a checkPointInterval, pickle
if self.checkPointInterval and (self.gen + 1) % self.checkPointInterval == 0:
self.checkPoint()
# The stuff below needs to be done in a re-start as well. See above "if self.proposals:"
self.startMinusOne = self.gen
# Start the tree partitions over.
self.treePartitions = None
# Zero the proposal counts
for p in self.proposals:
p.nProposals = [0] * self.nChains
p.nAcceptances = [0] * self.nChains
p.nTopologyChangeAttempts = [0] * self.nChains
p.nTopologyChanges = [0] * self.nChains
p.nAborts = [0] * self.nChains
# Zero the swap matrix
if self.nChains > 1:
self.swapMatrix = []
for i in range(self.nChains):
self.swapMatrix.append([0] * self.nChains)
# Reassuring pips ...
if firstGen != self.gen: #We want to skip the first gen of every call to run()
if nGensToDo <= 20000:
if (self.gen-firstGen) % 1000 == 0:
if verbose:
deltaTime = self._doTimeCheck(nGensToDo, firstGen, 1000)
if deltaTime.days:
timeString = "%s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
else:
timeString = time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds))
print "%10i - %s" % (self.gen, timeString)
else:
sys.stdout.write(".")
sys.stdout.flush()
elif (self.gen-firstGen) % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
else:
if (self.gen-firstGen) % 50000 == 0:
if verbose:
deltaTime = self._doTimeCheck(nGensToDo, firstGen, 50000)
if deltaTime.days:
timeString = "%s days, %s" % (
deltaTime.days, time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds)))
else:
timeString = time.strftime("%H:%M:%S",time.gmtime(deltaTime.seconds))
print "%10i - %s" % (self.gen, timeString)
else:
sys.stdout.write(".")
sys.stdout.flush()
elif (self.gen-firstGen) % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
# Gens finished. Clean up.
print
if verbose:
print "Finished %s generations." % nGensToDo
treeFile = file(self.treeFileName, 'a')
treeFile.write('end;\n\n')
treeFile.close()
def _doTimeCheck(self, nGensToDo, firstGen, genInterval):
"""Time check
firstGen is the first generation of this call to Mcmc.run() else
timing fails on restart"""
nowTime = time.time()
diff_secs = nowTime - self.lastTimeCheck
total_secs = (float(nGensToDo-(self.gen-firstGen))/float(genInterval))*float(diff_secs)
deltaTime = datetime.timedelta(seconds = int(round(total_secs)))
self.lastTimeCheck = nowTime
return deltaTime
def checkPoint(self):
# Maybe we should not save the inTrees? -- would make it more lightweight.
if 0:
for chNum in range(self.nChains):
ch = self.chains[chNum]
print "chain %i ==================" % chNum
ch.curTree.summarizeModelThingsNNodes()
# the Frrf object does not pickle
savedFrrfs = []
savedBigTrs = []
if self.stRFCalc == 'fastReducedRF':
for chNum in range(self.nChains):
ch = self.chains[chNum]
savedFrrfs.append(ch.frrf)
ch.frrf = None
savedBigTrs.append(ch.bigTr)
ch.bigTr = None
theCopy = copy.deepcopy(self)
theCopy.treePartitions._finishSplits()
theCopy.likesFile = None
theCopy.treeFile = None
#theCopy.treePartitions = None # this can be the biggest part of the pickle.
# Pickle it.
fName = "mcmc_checkPoint_%i.%i" % (self.runNum, self.gen + 1)
f = file(fName, 'w')
cPickle.dump(theCopy, f, 1)
f.close()
if self.stRFCalc == 'fastReducedRF':
for chNum in range(self.nChains):
ch = self.chains[chNum]
ch.frrf = savedFrrfs[chNum]
ch.bigTr = savedBigTrs[chNum]
def writeProposalProbs(self):
"""(Another) Pretty-print the proposal probabilities.
See also STMcmc.writeProposalAcceptances().
"""
nProposals = len(self.proposals)
if not nProposals:
print "STMcmc.writeProposalProbs(). No proposals (yet?)."
return
#intended = self.propWeights[:]
#for i in range(len(intended)):
# intended[i] /= self.totalPropWeights
#if math.fabs(sum(intended) - 1.0 > 1e-15):
# raise Glitch, 'bad sum of intended proposal probs. %s' % sum(intended)
nAttained = [0] * nProposals
nAccepted = [0] * nProposals
for i in range(nProposals):
nAttained[i] = self.proposals[i].nProposals[0]
nAccepted[i] = self.proposals[i].nAcceptances[0]
sumAttained = float(sum(nAttained)) # should be zero or nGen
if not sumAttained:
print "STMcmc.writeProposalProbs(). No proposals have been made."
print "Possibly, due to it being a checkPoint interval, nProposals have all been set to zero."
return
#assert int(sumAttained) == self.gen + 1, "sumAttained is %i, should be gen+1, %i." % (
# int(sumAttained), self.gen + 1)
probAttained = []
for i in range(len(nAttained)):
probAttained.append(100.0 * float(nAttained[i]) / sumAttained)
if math.fabs(sum(probAttained) - 100.0 > 1e-13):
raise Glitch, 'bad sum of attained proposal probs. %s' % sum(probAttained)
spacer = ' ' * 4
print "\nProposal probabilities (%)"
#print "There are %i proposals" % len(self.proposals)
print "For %i gens, from gens %i to %i, inclusive." % (
(self.gen - self.startMinusOne), self.startMinusOne + 1, self.gen)
print "%2s %11s %11s %11s %10s %23s %5s %5s" % ('', 'nProposals', 'proposed(%)',
'accepted(%)', 'tuning', 'proposal', 'part', 'num')
for i in range(len(self.proposals)):
print "%2i" % i,
p = self.proposals[i]
print " %7i " % self.proposals[i].nProposals[0],
print " %5.1f " % probAttained[i],
if nAttained[i]:
print " %5.1f " % (100.0 * float(nAccepted[i]) / float(nAttained[i])),
else:
print " - ",
if p.tuning == None:
print " - ",
elif p.tuning < 2.0:
print " %7.3f " % p.tuning,
else:
print " %7.1f " % p.tuning,
print " %20s" % p.name,
if p.pNum != -1:
print " %3i " % p.pNum,
else:
print " - ",
if p.mtNum != -1:
print " %3i " % p.mtNum,
else:
print " - ",
print
def writeProposalIntendedProbs(self):
"""Tabulate the intended proposal probabilities.
"""
nProposals = len(self.proposals)
if not nProposals:
print "STMcmc.writeProposalIntendedProbs(). No proposals (yet?)."
return
intended = self.propWeights[:]
for i in range(len(intended)):
intended[i] /= self.totalPropWeights
if math.fabs(sum(intended) - 1.0 > 1e-14):
raise Glitch, 'bad sum of intended proposal probs. %s' % sum(intended)
spacer = ' ' * 4
print "\nIntended proposal probabilities (%)"
#print "There are %i proposals" % len(self.proposals)
print "%2s %11s %23s %5s %5s" % ('', 'intended(%)', 'proposal', 'part', 'num')
for i in range(len(self.proposals)):
print "%2i" % i,
p = self.proposals[i]
print " %6.2f " % (100. * intended[i]),
print " %20s" % p.name,
if p.pNum != -1:
print " %3i " % p.pNum,
else:
print " - ",
if p.mtNum != -1:
print " %3i " % p.mtNum,
else:
print " - ",
print
class STMcmcCheckPointReader(object):
"""Read in and display mcmc_checkPoint files.
Three options--
To read in a specific checkpoint file, specify the file name by
fName=whatever
To read in the most recent (by os.path.getmtime()) checkpoint
file, say last=True
If you specify neither of the above, it will read in all the
checkPoint files that it finds.
Where it looks is determined by theGlob, which by default is '*',
ie everything in the current directory. If you want to look
somewhere else, you can specify eg
theGlob='SomeWhereElse/*'
or, if it is unambiguous, just
theGlob='S*/*'
So you might say
cpr = STMcmcCheckPointReader(theGlob='*_0.*')
to get all the checkpoints from the first run, run 0. Then, you
can tell the cpr object to do various things. Eg
cpr.writeProposalAcceptances()
But perhaps the most powerful thing about it is that it allows
easy access to the checkpointed Mcmc objects, in the list mm. Eg
to get the first one, ask for
m = cpr.mm[0]
and m is an STMcmc object, complete with all its records of
proposals and acceptances and so on. And the TreePartitions
object.
(Sorry! -- Lazy documentation. See the source code for more that it can do.)
"""
def __init__(self, fName=None, theGlob='*', last=False, verbose=True):
self.mm = []
if not fName:
#fList = [fName for fName in os.listdir(os.getcwd()) if fName.startswith("mcmc_checkPoint")]
#fList = glob.glob(theGlob)
#print "Full glob = %s" % fList
fList = [fName for fName in glob.glob(theGlob) if
os.path.basename(fName).startswith("mcmc_checkPoint")]
#print fList
if not fList:
raise Glitch, "No checkpoints found in this directory."
if last:
# Find the most recent
mostRecent = os.path.getmtime(fList[0])
mostRecentFileName = fList[0]
if len(fList) > 1:
for fName in fList[1:]:
mtime = os.path.getmtime(fName)
if mtime > mostRecent:
mostRecent = mtime
mostRecentFileName = fName
f = file(mostRecentFileName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
else:
# get all the files
for fName in fList:
f = file(fName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
self.mm = func.sortListOfObjectsOn2Attributes(self.mm, "gen", 'runNum')
else:
# get the file by name
f = file(fName)
m = cPickle.load(f)
f.close()
self.mm.append(m)
if verbose:
self.dump()
def dump(self):
print "STMcmcCheckPoints (%i checkPoints read)" % len(self.mm)
print "%12s %12s %12s %12s" % (" ", "index", "run", "gen+1")
print "%12s %12s %12s %12s" % (" ", "-----", "---", "-----")
for i in range(len(self.mm)):
m = self.mm[i]
#print " %2i run %2i, gen+1 %11i" % (i, m.runNum, m.gen+1)
print "%12s %12s %12s %12s" % (" ", i, m.runNum, m.gen+1)
def compareSplits(self, mNum1, mNum2, verbose=True, minimumProportion=0.1):
"""Should we be only looking at splits within the 95% ci of the topologies?"""
m1 = self.mm[mNum1]
m2 = self.mm[mNum2]
tp1 = m1.treePartitions
tp2 = m2.treePartitions
if verbose:
print "\nSTMcmcCheckPointReader.compareSplits(%i,%i)" % (mNum1, mNum2)
print "%12s %12s %12s %12s %12s" % ("mNum", "runNum", "start", "gen+1", "nTrees")
for i in range(5):
print " ---------",
print
for mNum in [mNum1, mNum2]:
print " %10i " % mNum,
m = self.mm[mNum]
print " %10i " % m.runNum,
print " %10i " % (m.startMinusOne + 1),
print " %10i " % (m.gen + 1),
#for i in m.splitCompares:
# print i
print " %10i " % m.treePartitions.nTrees
asdos = self.compareSplitsBetweenTwoTreePartitions(tp1, tp2, minimumProportion, verbose=verbose)
if asdos == None and verbose:
print "No splits > %s" % minimumProportion
return asdos
def compareSplitsBetweenTwoTreePartitions(tp1, tp2, minimumProportion, verbose=False):
ret = tp1.compareSplits(tp2, minimumProportion=minimumProportion)
if ret != []:
sumOfStdDevs = 0.0
diffs = []
if ret and len(ret):
nSplits = len(ret)
for i in ret:
#print " %.3f %.3f " % (i[2][0], i[2][1]),
stdDev = math.sqrt(func.variance(i[2]))
#print "%.5f" % stdDev
sumOfStdDevs += stdDev
diffs.append(math.fabs(i[2][0] - i[2][1]))
if verbose:
#print " %f " % sumOfStdDevs,
print " nSplits=%i, average of std devs of splits %.4f " % (nSplits, sumOfStdDevs/nSplits)
print " max difference %f, mean difference %f" % (max(diffs), sum(diffs)/nSplits)
return sumOfStdDevs/nSplits
else:
return None
compareSplitsBetweenTwoTreePartitions = staticmethod(compareSplitsBetweenTwoTreePartitions)
def compareSplitsAll(self):
nM = len(self.mm)
nItems = ((nM * nM) - nM)/2
results = np.zeros((nM, nM), np.float)
vect = np.zeros(nItems, np.float)
vCounter = 0
for mNum1 in range(1, nM):
for mNum2 in range(mNum1):
ret = self.compareSplits(mNum1, mNum2, verbose=False)
#print "+++ ret = %s" % ret
if ret == None:
ret = 0.0
results[mNum1][mNum2] = ret
results[mNum2][mNum1] = ret
vect[vCounter] = ret
vCounter += 1
if 0:
print " %10i " % mNum1,
print " %10i " % mNum2,
print "%.3f" % ret
print results
print "For the %i values in one triangle," % nItems
print "max = ", vect.max()
print "min = ", vect.min()
print "mean = ", vect.mean()
print "var = ", vect.var()
def writeProposalAcceptances(self):
for m in self.mm:
m.writeProposalAcceptances()
def writeSwapMatrices(self):
for m in self.mm:
if m.nChains > 1:
m.writeSwapMatrix()
def writeProposalProbs(self):
for m in self.mm:
m.writeProposalProbs()
| Linhua-Sun/p4-phylogenetics | p4/STMcmc.py | Python | gpl-2.0 | 122,106 |
import Gears as gears
from .. import *
try:
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print ('ERROR: PyOpenGL not installed properly.')
import random
def box() :
glBegin(GL_QUADS)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glColor3f(1.0,0.5,0.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glColor3f(1.0,1.0,0.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(-1.0, 1.0, 1.0)
glVertex3f(-1.0, 1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0, 1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(1.0, 1.0,-1.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(1.0,-1.0, 1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
class Flyby() :
args = None
def __init__(self, **args):
self.args = args
def apply(self, stimulus) :
self.applyWithArgs(stimulus, **self.args)
def applyWithArgs(
self,
stimulus,
) :
stimulus.enableColorMode()
stimulus.setForwardRenderingCallback(self.render)
stimulus.registerCallback(gears.StimulusStartEvent.typeId, self.start)
stimulus.registerCallback(gears.StimulusEndEvent.typeId, self.finish)
def start( self, event ):
print('hello start flyby')
self.glist = glGenLists(1)
glNewList(self.glist, GL_COMPILE)
for i in range(0, 400) :
glPushMatrix()
glTranslated(
random.uniform( a = -20, b = 20),
random.uniform( a = -20, b = 20),
random.uniform( a = -20, b = 20),
)
box()
glPopMatrix()
glEndList()
def finish( self, event ):
glDeleteLists(self.glist, 1)
def render(self, iFrame):
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE);
glClearColor(0.0, 0.0, 0.0, 1.0 )
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0, 0, 20- iFrame * 0.1, 0, 0, 19 - iFrame * 0.1, 0, 1, 0)
glTranslated(0, 0, -40 * (iFrame // 400))
glCallList(self.glist)
glTranslated(0, 0, -40)
glCallList(self.glist)
glDisable(GL_DEPTH_TEST)
glDepthMask(GL_FALSE);
| szecsi/Gears | GearsPy/Project/Components/Forward/Flyby.py | Python | gpl-2.0 | 2,803 |
'''
Created on Apr 19, 2015
@author: bcopy
'''
import os
import cherrypy
import sys
import subprocess
import random
import time
import threading
import Queue
import tempfile
class ScriptMonitor(object):
'''
Monitors the script execution and updates result statuses
'''
def __init__(self):
self.m_processInitialized = False
def monitor(self, process):
assert isinstance(process, subprocess.Popen)
self.m_processInitialized = True
self.m_process = process
if(self.m_process.pid != None and self.m_process.poll() == None):
print "Starting raspbuggy script process output polling..."
self.m_stdoutQueue = Queue.Queue()
self.m_stderrQueue = Queue.Queue()
self.m_stdoutReader = AsynchronousFileReader(self.m_process.stdout, self.m_stdoutQueue)
self.m_stdoutReader.start()
else:
print "Raspbuggy script process startup failed."
def abort(self):
print "Starting raspbuggy script process output polling..."
if(self.m_processInitialized and self.m_process.poll() == None):
self.m_process.terminate()
self.m_processInitialized = False
def isRunning(self):
return (self.m_processInitialized and self.m_process.poll() == None)
def getStdoutQueue(self):
return self.m_stdoutQueue
def getStderrQueue(self):
return self.m_stderrQueue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue)
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
class RaspbuggyService(object):
def __init__(self):
self.m_scriptMonitor = None
@cherrypy.expose
@cherrypy.tools.json_out()
def ping(self):
return {"msg": "pong"}
@cherrypy.expose
@cherrypy.tools.json_out()
def status(self):
if(self.m_scriptMonitor != None):
running = self.m_scriptMonitor.isRunning()
retCode = self.m_scriptMonitor.m_process.poll()
if(retCode == None):
retCode = -1
return {"running":running,"exitCode":retCode}
else:
return {"running":False,"exitCode":-1}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def execute(self):
scriptData = cherrypy.request.json
if(self.m_scriptMonitor == None):
self.m_scriptMonitor = ScriptMonitor()
if(scriptData["scriptText"] == None):
return {"success":False, "message":"Script contents undefined"}
elif(self.m_scriptMonitor.isRunning()):
return {"success":False, "message":"Script already running !"}
else:
# Write the script to a temporary file
#scriptFile = tempfile.NamedTemporaryFile(prefix='raspbuggy-script-')
scriptFile = open("/tmp/raspbuggy-script.py", "w")
scriptFile.write(scriptData["scriptText"]+"\n")
scriptFile.close()
print "Executing script "+scriptFile.name+" ..."
scriptProcess = subprocess.Popen(["python", scriptFile.name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=128)
if(scriptProcess.pid != None):
self.m_scriptMonitor.monitor(scriptProcess)
return {"success":True, "message": "Running script (pid "+str(self.m_scriptMonitor.m_process.pid)+")"}
else:
return {"success":False, "message": "Could not start up script"}
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def abort(self):
return {"result":1}
@cherrypy.expose
@cherrypy.tools.json_out()
def tailStdOut(self):
return {"tail": "New line\nNew line"}
if __name__ == '__main__':
WEBAPP_ROOT = os.getenv('RASPBUGGY_WEBAPP_ROOT',os.getcwd()+"/src/main/webapp")
BLOCKLY_ROOT = os.getenv('BLOCKLY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/blockly/b35c0fbfa2")
BOOTSTRAP_ROOT = os.getenv('BOOTSTRAP_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/bootstrap/3.3.4")
JQUERY_ROOT = os.getenv('JQUERY_ROOT',os.getcwd()+"/target/webjars/META-INF/resources/webjars/jquery/1.9.1")
#print os.path.abspath(WEBAPP_ROOT)
#print os.path.abspath(BLOCKLY_ROOT)
cherrypy.quickstart(RaspbuggyService(), "/",
{
'/':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(WEBAPP_ROOT)
},
'/blockly':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BLOCKLY_ROOT)
},
'/bootstrap':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(BOOTSTRAP_ROOT)
},
'/jquery':
{
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.abspath(JQUERY_ROOT)
}
})
| bcopy/raspbuggy | modules/pywebide/src/main/python/raspbuggy/webide/main.py | Python | gpl-2.0 | 5,823 |
#!/usr/bin/python
#
# centos5.py - A webKickstart module to handle changes needed from
# RHEL 5 to CentOS 5 Kickstart generation.
#
# Copyright 2007 NC State University
# Written by Jack Neely <jjneely@ncsu.edu>
#
# SDG
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from baseRealmLinuxKickstart import baseRealmLinuxKickstart
class Kickstart(baseRealmLinuxKickstart):
def __init__(self, url, cfg, sc=None):
baseRealmLinuxKickstart.__init__(self, url, cfg, sc)
self.buildOrder.remove(self.installationNumber)
self.buildOrder.remove(self.RHN)
| jjneely/webkickstart | archive/centos5.py | Python | gpl-2.0 | 1,227 |
from django.db import models
from stdimage import StdImageField
from django.core.validators import RegexValidator
import datetime
YEAR_CHOICES = []
for r in range(1980, (datetime.datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
S_CHOICE = [('1stYear','1stYear'),('2ndYear','2ndYear'),('3rdYear','3rdYear'),('4thYear','4thYear')]
# Create your models here.
class Hostel(models.Model):
HostelName = models.CharField(max_length=100, primary_key=True)
HostelType = models.CharField(max_length=10)
HostelSeat = models.IntegerField()
HostelImage = StdImageField(upload_to='Hostels/logo/',variations={'large': (675, 300,True)})
HostelAddress = models.CharField(max_length=200)
HostelDescription = models.TextField()
HostelEmail = models.EmailField()
phone_regex = RegexValidator(regex=r'^\+?1?\d{10,13}$', message="Phone number must be entered in the format: '+999999999'. Up to 13 digits allowed.")
HostelPhoneNo = models.CharField(max_length=13,validators=[phone_regex], blank=True)
def __str__(self):
return self.HostelName
class HostelEvents(models.Model):
HostelName = models.ForeignKey(Hostel)
HostelEventsName = models.CharField(max_length=100)
HostelEventDescription = models.TextField()
def __str__(self):
return self.HostelEventsName
class HostelPictureGalary(models.Model):
HostelName = models.ForeignKey(Hostel)
PictureName = models.CharField(max_length=100)
PictureLocation = StdImageField(upload_to='Hostels/galary/',variations={'large': (675, 300,True)})
def __str__(self):
return self.PictureName
class HostelBody(models.Model):
HostelName = models.ForeignKey(Hostel)
HostelbodyRole = models.CharField(max_length=100)
HostelbodyRoleYear = models.IntegerField(choices=YEAR_CHOICES, default=datetime.datetime.now().year)
PersonName = models.CharField (max_length=10)
PersonYear = models.CharField (max_length=7, choices=S_CHOICE,default='NA')
PersonImage = StdImageField(upload_to='Hostels/gb/',variations={'thumbnail': (300, 200,True)})
def __str__(self):
return self.HostelbodyRole
| bpain2010/kgecweb | hostels/models.py | Python | gpl-2.0 | 2,053 |
#! /usr/bin/env python3
import setuptools
setuptools.setup(
name='vimdecrypt',
version='2.0',
author='Gertjan van Zwieten',
py_modules=['vimdecrypt'],
scripts=['vimdecrypt'],
test_suite='vimdecrypt',
)
| gertjanvanzwieten/vimdecrypt | setup.py | Python | gpl-2.0 | 216 |
#
# Copyright (C) 2014 Tommy Winther
# http://tommy.winther.nu
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - bluezed.apps@gmail.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this Program; see the file LICENSE.txt. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import datetime
import json
import os
import threading
import time
import xbmc
import xbmcgui
import source as src
from notification import Notification
from strings import *
import streaming
from utils import reset_playing
DEBUG = False
MODE_EPG = 'EPG'
MODE_TV = 'TV'
MODE_OSD = 'OSD'
ACTION_LEFT = 1
ACTION_RIGHT = 2
ACTION_UP = 3
ACTION_DOWN = 4
ACTION_PAGE_UP = 5
ACTION_PAGE_DOWN = 6
ACTION_SELECT_ITEM = 7
ACTION_PARENT_DIR = 9
ACTION_PREVIOUS_MENU = 10
ACTION_SHOW_INFO = 11
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_MOUSE_WHEEL_UP = 104
ACTION_MOUSE_WHEEL_DOWN = 105
ACTION_MOUSE_MOVE = 107
KEY_NAV_BACK = 92
KEY_CONTEXT_MENU = 117
KEY_HOME = 159
KEY_ESC = 61467
CHANNELS_PER_PAGE = 8
HALF_HOUR = datetime.timedelta(minutes=30)
SKIN = ADDON.getSetting('skin')
def debug(s):
if DEBUG: xbmc.log(str(s), xbmc.LOGDEBUG)
class Point(object):
def __init__(self):
self.x = self.y = 0
def __repr__(self):
return 'Point(x=%d, y=%d)' % (self.x, self.y)
class EPGView(object):
def __init__(self):
self.top = self.left = self.right = self.bottom = self.width = self.cellHeight = 0
class ControlAndProgram(object):
def __init__(self, control, program):
self.control = control
self.program = program
class TVGuide(xbmcgui.WindowXML):
C_MAIN_DATE_LONG = 3999
C_MAIN_DATE = 4000
C_MAIN_TITLE = 4020
C_MAIN_TIME = 4021
C_MAIN_DESCRIPTION = 4022
C_MAIN_IMAGE = 4023
C_MAIN_LOGO = 4024
C_MAIN_TIMEBAR = 4100
C_MAIN_LOADING = 4200
C_MAIN_LOADING_PROGRESS = 4201
C_MAIN_LOADING_TIME_LEFT = 4202
C_MAIN_LOADING_CANCEL = 4203
C_MAIN_MOUSE_CONTROLS = 4300
C_MAIN_MOUSE_HOME = 4301
C_MAIN_MOUSE_LEFT = 4302
C_MAIN_MOUSE_UP = 4303
C_MAIN_MOUSE_DOWN = 4304
C_MAIN_MOUSE_RIGHT = 4305
C_MAIN_MOUSE_EXIT = 4306
C_MAIN_BACKGROUND = 4600
C_MAIN_EPG = 5000
C_MAIN_EPG_VIEW_MARKER = 5001
C_MAIN_OSD = 6000
C_MAIN_OSD_TITLE = 6001
C_MAIN_OSD_TIME = 6002
C_MAIN_OSD_DESCRIPTION = 6003
C_MAIN_OSD_CHANNEL_LOGO = 6004
C_MAIN_OSD_CHANNEL_TITLE = 6005
def __new__(cls):
return super(TVGuide, cls).__new__(cls, 'script-tvguide-main.xml', ADDON.getAddonInfo('path'), SKIN)
def __init__(self):
super(TVGuide, self).__init__()
self.notification = None
self.redrawingEPG = False
self.isClosing = False
self.controlAndProgramList = list()
self.ignoreMissingControlIds = list()
self.channelIdx = 0
self.focusPoint = Point()
self.epgView = EPGView()
self.streamingService = streaming.StreamsService(ADDON)
self.player = xbmc.Player()
self.database = None
self.proc_file = xbmc.translatePath(os.path.join(ADDON.getAddonInfo('profile'), 'proc'))
if not os.path.exists(self.proc_file):
self.reset_playing()
self.mode = MODE_EPG
self.currentChannel = None
self.osdEnabled = ADDON.getSetting('enable.osd') == 'true' and ADDON.getSetting(
'alternative.playback') != 'true'
self.alternativePlayback = ADDON.getSetting('alternative.playback') == 'true'
self.osdChannel = None
self.osdProgram = None
# find nearest half hour
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
def getControl(self, controlId):
try:
return super(TVGuide, self).getControl(controlId)
except:
if controlId in self.ignoreMissingControlIds:
return None
if not self.isClosing:
self.close()
return None
def close(self):
if not self.isClosing:
self.isClosing = True
if self.player.isPlaying():
if ADDON.getSetting('background.stream') == 'false':
self.reset_playing()
self.player.stop()
if self.database:
self.database.close(super(TVGuide, self).close)
else:
super(TVGuide, self).close()
def onInit(self):
is_playing, play_data = self.check_is_playing()
self._hideControl(self.C_MAIN_MOUSE_CONTROLS, self.C_MAIN_OSD)
self._showControl(self.C_MAIN_EPG, self.C_MAIN_LOADING)
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(BACKGROUND_UPDATE_IN_PROGRESS))
self.setFocusId(self.C_MAIN_LOADING_CANCEL)
control = self.getControl(self.C_MAIN_EPG_VIEW_MARKER)
if control:
left, top = control.getPosition()
self.focusPoint.x = left
self.focusPoint.y = top
self.epgView.left = left
self.epgView.top = top
self.epgView.right = left + control.getWidth()
self.epgView.bottom = top + control.getHeight()
self.epgView.width = control.getWidth()
self.epgView.cellHeight = control.getHeight() / CHANNELS_PER_PAGE
if is_playing and 'idx' in play_data:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
self.channelIdx = play_data['idx']
if self.database and 'y' in play_data:
self.focusPoint.y = play_data['y']
self.onRedrawEPG(self.channelIdx, self.viewStartDate,
focusFunction=self._findCurrentTimeslot)
elif self.database:
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
else:
try:
self.database = src.Database()
except src.SourceNotConfiguredException:
self.onSourceNotConfigured()
self.close()
return
self.database.initialize(self.onSourceInitialized,
self.isSourceInitializationCancelled)
self.updateTimebar()
def onAction(self, action):
debug('Mode is: %s' % self.mode)
if self.mode == MODE_TV:
self.onActionTVMode(action)
elif self.mode == MODE_OSD:
self.onActionOSDMode(action)
elif self.mode == MODE_EPG:
self.onActionEPGMode(action)
def onActionTVMode(self, action):
if action.getId() == ACTION_PAGE_UP:
self._channelUp()
elif action.getId() == ACTION_PAGE_DOWN:
self._channelDown()
elif not self.osdEnabled:
pass # skip the rest of the actions
elif action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK, KEY_CONTEXT_MENU, ACTION_PREVIOUS_MENU]:
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif action.getId() == ACTION_SHOW_INFO:
self._showOsd()
def onActionOSDMode(self, action):
if action.getId() == ACTION_SHOW_INFO:
self._hideOsd()
elif action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK, KEY_CONTEXT_MENU, ACTION_PREVIOUS_MENU]:
self._hideOsd()
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif action.getId() == ACTION_SELECT_ITEM:
if self.playChannel(self.osdChannel, self.osdProgram):
self._hideOsd()
elif action.getId() == ACTION_PAGE_UP:
self._channelUp()
self._showOsd()
elif action.getId() == ACTION_PAGE_DOWN:
self._channelDown()
self._showOsd()
elif action.getId() == ACTION_UP:
self.osdChannel = self.database.getPreviousChannel(self.osdChannel)
self.osdProgram = self.database.getCurrentProgram(self.osdChannel)
self._showOsd()
elif action.getId() == ACTION_DOWN:
self.osdChannel = self.database.getNextChannel(self.osdChannel)
self.osdProgram = self.database.getCurrentProgram(self.osdChannel)
self._showOsd()
elif action.getId() == ACTION_LEFT:
previousProgram = self.database.getPreviousProgram(self.osdProgram)
if previousProgram:
self.osdProgram = previousProgram
self._showOsd()
elif action.getId() == ACTION_RIGHT:
nextProgram = self.database.getNextProgram(self.osdProgram)
if nextProgram:
self.osdProgram = nextProgram
self._showOsd()
def onActionEPGMode(self, action):
if action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK]:
self.close()
return
# catch the ESC key
elif action.getId() == ACTION_PREVIOUS_MENU and action.getButtonCode() == KEY_ESC:
self.close()
return
elif action.getId() == ACTION_MOUSE_MOVE:
self._showControl(self.C_MAIN_MOUSE_CONTROLS)
return
elif action.getId() == KEY_CONTEXT_MENU:
if self.player.isPlaying():
self._hideEpg()
controlInFocus = None
currentFocus = self.focusPoint
try:
controlInFocus = self.getFocus()
if controlInFocus in [elem.control for elem in self.controlAndProgramList]:
(left, top) = controlInFocus.getPosition()
currentFocus = Point()
currentFocus.x = left + (controlInFocus.getWidth() / 2)
currentFocus.y = top + (controlInFocus.getHeight() / 2)
except Exception:
control = self._findControlAt(self.focusPoint)
if control is None and len(self.controlAndProgramList) > 0:
control = self.controlAndProgramList[0].control
if control is not None:
self.setFocus(control)
return
if action.getId() == ACTION_LEFT:
self._left(currentFocus)
elif action.getId() == ACTION_RIGHT:
self._right(currentFocus)
elif action.getId() == ACTION_UP:
self._up(currentFocus)
elif action.getId() == ACTION_DOWN:
self._down(currentFocus)
elif action.getId() == ACTION_NEXT_ITEM:
self._nextDay()
elif action.getId() == ACTION_PREV_ITEM:
self._previousDay()
elif action.getId() == ACTION_PAGE_UP:
self._moveUp(CHANNELS_PER_PAGE)
elif action.getId() == ACTION_PAGE_DOWN:
self._moveDown(CHANNELS_PER_PAGE)
elif action.getId() == ACTION_MOUSE_WHEEL_UP:
self._moveUp(scrollEvent=True)
elif action.getId() == ACTION_MOUSE_WHEEL_DOWN:
self._moveDown(scrollEvent=True)
elif action.getId() == KEY_HOME:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif action.getId() in [KEY_CONTEXT_MENU, ACTION_PREVIOUS_MENU] and controlInFocus is not None:
program = self._getProgramFromControl(controlInFocus)
if program is not None:
self._showContextMenu(program)
else:
xbmc.log('[script.ftvguide] Unhandled ActionId: ' + str(action.getId()), xbmc.LOGDEBUG)
def onClick(self, controlId):
if controlId in [self.C_MAIN_LOADING_CANCEL, self.C_MAIN_MOUSE_EXIT]:
self.close()
return
if self.isClosing:
return
if controlId == self.C_MAIN_MOUSE_HOME:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
elif controlId == self.C_MAIN_MOUSE_LEFT:
self.viewStartDate -= datetime.timedelta(hours=2)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
elif controlId == self.C_MAIN_MOUSE_UP:
self._moveUp(count=CHANNELS_PER_PAGE)
return
elif controlId == self.C_MAIN_MOUSE_DOWN:
self._moveDown(count=CHANNELS_PER_PAGE)
return
elif controlId == self.C_MAIN_MOUSE_RIGHT:
self.viewStartDate += datetime.timedelta(hours=2)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
return
program = self._getProgramFromControl(self.getControl(controlId))
if program is None:
return
if not self.playChannel(program.channel, program):
result = self.streamingService.detectStream(program.channel)
if not result:
# could not detect stream, show context menu
self._showContextMenu(program)
elif type(result) == str:
# one single stream detected, save it and start streaming
self.database.setCustomStreamUrl(program.channel, result)
self.playChannel(program.channel, program)
else:
# multiple matches, let user decide
d = ChooseStreamAddonDialog(result)
d.doModal()
if d.stream is not None:
self.database.setCustomStreamUrl(program.channel, d.stream)
self.playChannel(program.channel, program)
def _showContextMenu(self, program):
self._hideControl(self.C_MAIN_MOUSE_CONTROLS)
d = PopupMenu(self.database, program, not program.notificationScheduled)
d.doModal()
buttonClicked = d.buttonClicked
del d
if buttonClicked == PopupMenu.C_POPUP_REMIND:
if program.notificationScheduled:
self.notification.removeNotification(program)
else:
self.notification.addNotification(program)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif buttonClicked == PopupMenu.C_POPUP_CHOOSE_STREAM:
d = StreamSetupDialog(self.database, program.channel)
d.doModal()
del d
elif buttonClicked == PopupMenu.C_POPUP_PLAY:
self.playChannel(program.channel, program)
elif buttonClicked == PopupMenu.C_POPUP_CHANNELS:
d = ChannelsMenu(self.database)
d.doModal()
del d
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
elif buttonClicked == PopupMenu.C_POPUP_QUIT:
self.close()
elif buttonClicked == PopupMenu.C_POPUP_LIBMOV:
xbmc.executebuiltin('ActivateWindow(Videos,videodb://movies/titles/)')
elif buttonClicked == PopupMenu.C_POPUP_LIBTV:
xbmc.executebuiltin('ActivateWindow(Videos,videodb://tvshows/titles/)')
elif buttonClicked == PopupMenu.C_POPUP_VIDEOADDONS:
xbmc.executebuiltin('ActivateWindow(Videos,addons://sources/video/)')
elif buttonClicked == PopupMenu.C_POPUP_PLAY_BEGINNING:
title = program.title.replace(" ", "%20").replace(",", "").replace(u"\u2013", "-")
title = unicode.encode(title, "ascii", "ignore")
if program.is_movie == "Movie":
selection = 0
elif program.season is not None:
selection = 1
else:
selection = xbmcgui.Dialog().select("Choose media type", ["Search as Movie", "Search as TV Show"])
if selection == 0:
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.meta/movies/play_by_name/%s/%s)" % (
title, program.language))
elif selection == 1:
if program.season and program.episode:
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.meta/tv/play_by_name/%s/%s/%s/%s)" % (
title, program.season, program.episode, program.language))
else:
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.meta/tv/play_by_name_only/%s/%s)" % (
title, program.language))
def setFocusId(self, controlId):
control = self.getControl(controlId)
if control:
self.setFocus(control)
def setFocus(self, control):
debug('setFocus %d' % control.getId())
if control in [elem.control for elem in self.controlAndProgramList]:
debug('Focus before %s' % self.focusPoint)
(left, top) = control.getPosition()
if left > self.focusPoint.x or left + control.getWidth() < self.focusPoint.x:
self.focusPoint.x = left
self.focusPoint.y = top + (control.getHeight() / 2)
debug('New focus at %s' % self.focusPoint)
super(TVGuide, self).setFocus(control)
def onFocus(self, controlId):
try:
controlInFocus = self.getControl(controlId)
except Exception:
return
program = self._getProgramFromControl(controlInFocus)
if program is None:
return
title = '[B]%s[/B]' % program.title
if program.season is not None and program.episode is not None:
title += " [B]S%sE%s[/B]" % (program.season, program.episode)
if program.is_movie == "Movie":
title += " [B](Movie)[/B]"
self.setControlLabel(self.C_MAIN_TITLE, title)
if program.startDate or program.endDate:
self.setControlLabel(self.C_MAIN_TIME,
'[B]%s - %s[/B]' % (
self.formatTime(program.startDate), self.formatTime(program.endDate)))
else:
self.setControlLabel(self.C_MAIN_TIME, '')
if program.description:
description = program.description
else:
description = strings(NO_DESCRIPTION)
self.setControlText(self.C_MAIN_DESCRIPTION, description)
if program.channel.logo is not None:
self.setControlImage(self.C_MAIN_LOGO, program.channel.logo)
else:
self.setControlImage(self.C_MAIN_LOGO, '')
if program.imageSmall is not None:
self.setControlImage(self.C_MAIN_IMAGE, program.imageSmall)
else:
self.setControlImage(self.C_MAIN_IMAGE, 'tvguide-logo-epg.png')
if ADDON.getSetting('program.background.enabled') == 'true' and program.imageLarge is not None:
self.setControlImage(self.C_MAIN_BACKGROUND, program.imageLarge)
if self.player.isPlaying() and not self.osdEnabled and \
ADDON.getSetting('background.stream') == 'false':
self.reset_playing()
self.player.stop()
def _left(self, currentFocus):
control = self._findControlOnLeft(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.viewStartDate -= datetime.timedelta(hours=2)
self.focusPoint.x = self.epgView.right
self.onRedrawEPG(self.channelIdx, self.viewStartDate, focusFunction=self._findControlOnLeft)
def _right(self, currentFocus):
control = self._findControlOnRight(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.viewStartDate += datetime.timedelta(hours=2)
self.focusPoint.x = self.epgView.left
self.onRedrawEPG(self.channelIdx, self.viewStartDate, focusFunction=self._findControlOnRight)
def _up(self, currentFocus):
currentFocus.x = self.focusPoint.x
control = self._findControlAbove(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.focusPoint.y = self.epgView.bottom
self.onRedrawEPG(self.channelIdx - CHANNELS_PER_PAGE, self.viewStartDate,
focusFunction=self._findControlAbove)
def _down(self, currentFocus):
currentFocus.x = self.focusPoint.x
control = self._findControlBelow(currentFocus)
if control is not None:
self.setFocus(control)
elif control is None:
self.focusPoint.y = self.epgView.top
self.onRedrawEPG(self.channelIdx + CHANNELS_PER_PAGE, self.viewStartDate,
focusFunction=self._findControlBelow)
def _nextDay(self):
self.viewStartDate += datetime.timedelta(days=1)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
def _previousDay(self):
self.viewStartDate -= datetime.timedelta(days=1)
self.onRedrawEPG(self.channelIdx, self.viewStartDate)
def _moveUp(self, count=1, scrollEvent=False):
if scrollEvent:
self.onRedrawEPG(self.channelIdx - count, self.viewStartDate)
else:
self.focusPoint.y = self.epgView.bottom
self.onRedrawEPG(self.channelIdx - count, self.viewStartDate, focusFunction=self._findControlAbove)
def _moveDown(self, count=1, scrollEvent=False):
if scrollEvent:
self.onRedrawEPG(self.channelIdx + count, self.viewStartDate)
else:
self.focusPoint.y = self.epgView.top
self.onRedrawEPG(self.channelIdx + count, self.viewStartDate, focusFunction=self._findControlBelow)
def _channelUp(self):
channel = self.database.getNextChannel(self.currentChannel)
program = self.database.getCurrentProgram(channel)
self.playChannel(channel, program)
def _channelDown(self):
channel = self.database.getPreviousChannel(self.currentChannel)
program = self.database.getCurrentProgram(channel)
self.playChannel(channel, program)
def playChannel(self, channel, program=None):
self.currentChannel = channel
wasPlaying = self.player.isPlaying()
url = self.database.getStreamUrl(channel)
if url:
self.set_playing()
if str.startswith(url, "plugin://plugin.video.meta") and program is not None:
import urllib
title = urllib.quote(program.title)
url += "/%s/%s" % (title, program.language)
if url[0:9] == 'plugin://':
if self.alternativePlayback:
xbmc.executebuiltin('XBMC.RunPlugin(%s)' % url)
elif self.osdEnabled:
xbmc.executebuiltin('PlayMedia(%s,1)' % url)
else:
xbmc.executebuiltin('PlayMedia(%s)' % url)
else:
self.player.play(item=url, windowed=self.osdEnabled)
if not wasPlaying:
self._hideEpg()
threading.Timer(1, self.waitForPlayBackStopped).start()
self.osdProgram = self.database.getCurrentProgram(self.currentChannel)
return url is not None
def waitForPlayBackStopped(self):
for retry in range(0, 100):
time.sleep(0.1)
if self.player.isPlaying():
break
while self.player.isPlaying() and not xbmc.abortRequested and not self.isClosing:
time.sleep(0.5)
self.onPlayBackStopped()
def _showOsd(self):
if not self.osdEnabled:
return
if self.mode != MODE_OSD:
self.osdChannel = self.currentChannel
if self.osdProgram is not None:
self.setControlLabel(self.C_MAIN_OSD_TITLE, '[B]%s[/B]' % self.osdProgram.title)
if self.osdProgram.startDate or self.osdProgram.endDate:
self.setControlLabel(self.C_MAIN_OSD_TIME, '[B]%s - %s[/B]' % (
self.formatTime(self.osdProgram.startDate), self.formatTime(self.osdProgram.endDate)))
else:
self.setControlLabel(self.C_MAIN_OSD_TIME, '')
self.setControlText(self.C_MAIN_OSD_DESCRIPTION, self.osdProgram.description)
self.setControlLabel(self.C_MAIN_OSD_CHANNEL_TITLE, self.osdChannel.title)
if self.osdProgram.channel.logo is not None:
self.setControlImage(self.C_MAIN_OSD_CHANNEL_LOGO, self.osdProgram.channel.logo)
else:
self.setControlImage(self.C_MAIN_OSD_CHANNEL_LOGO, '')
self.mode = MODE_OSD
self._showControl(self.C_MAIN_OSD)
def _hideOsd(self):
self.mode = MODE_TV
self._hideControl(self.C_MAIN_OSD)
def _hideEpg(self):
self._hideControl(self.C_MAIN_EPG)
self.mode = MODE_TV
self._clearEpg()
def onRedrawEPG(self, channelStart, startTime, focusFunction=None):
if self.redrawingEPG or (self.database is not None and self.database.updateInProgress) or self.isClosing:
debug('onRedrawEPG - already redrawing')
return # ignore redraw request while redrawing
debug('onRedrawEPG')
self.redrawingEPG = True
self.mode = MODE_EPG
self._showControl(self.C_MAIN_EPG)
self.updateTimebar(scheduleTimer=False)
# show Loading screen
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(CALCULATING_REMAINING_TIME))
self._showControl(self.C_MAIN_LOADING)
self.setFocusId(self.C_MAIN_LOADING_CANCEL)
# remove existing controls
self._clearEpg()
try:
self.channelIdx, channels, programs = self.database.getEPGView(channelStart, startTime,
self.onSourceProgressUpdate,
clearExistingProgramList=False)
except src.SourceException:
self.onEPGLoadError()
return
channelsWithoutPrograms = list(channels)
# date and time row
self.setControlLabel(self.C_MAIN_DATE, self.formatDate(self.viewStartDate, False))
self.setControlLabel(self.C_MAIN_DATE_LONG, self.formatDate(self.viewStartDate, True))
for col in range(1, 5):
self.setControlLabel(4000 + col, self.formatTime(startTime))
startTime += HALF_HOUR
if programs is None:
self.onEPGLoadError()
return
# set channel logo or text
showLogo = ADDON.getSetting('logos.enabled') == 'true'
for idx in range(0, CHANNELS_PER_PAGE):
if idx >= len(channels):
self.setControlImage(4110 + idx, ' ')
self.setControlLabel(4010 + idx, ' ')
else:
channel = channels[idx]
self.setControlLabel(4010 + idx, channel.title)
if (channel.logo is not None and showLogo == True):
self.setControlImage(4110 + idx, channel.logo)
else:
self.setControlImage(4110 + idx, ' ')
for program in programs:
idx = channels.index(program.channel)
if program.channel in channelsWithoutPrograms:
channelsWithoutPrograms.remove(program.channel)
startDelta = program.startDate - self.viewStartDate
stopDelta = program.endDate - self.viewStartDate
cellStart = self._secondsToXposition(startDelta.seconds)
if startDelta.days < 0:
cellStart = self.epgView.left
cellWidth = self._secondsToXposition(stopDelta.seconds) - cellStart
if cellStart + cellWidth > self.epgView.right:
cellWidth = self.epgView.right - cellStart
if cellWidth > 1:
if program.notificationScheduled:
noFocusTexture = 'tvguide-program-red.png'
focusTexture = 'tvguide-program-red-focus.png'
else:
noFocusTexture = 'tvguide-program-grey.png'
focusTexture = 'tvguide-program-grey-focus.png'
if cellWidth < 25:
title = '' # Text will overflow outside the button if it is too narrow
else:
title = program.title
control = xbmcgui.ControlButton(
cellStart,
self.epgView.top + self.epgView.cellHeight * idx,
cellWidth - 2,
self.epgView.cellHeight - 2,
title,
noFocusTexture=noFocusTexture,
focusTexture=focusTexture
)
self.controlAndProgramList.append(ControlAndProgram(control, program))
for channel in channelsWithoutPrograms:
idx = channels.index(channel)
control = xbmcgui.ControlButton(
self.epgView.left,
self.epgView.top + self.epgView.cellHeight * idx,
(self.epgView.right - self.epgView.left) - 2,
self.epgView.cellHeight - 2,
strings(NO_PROGRAM_AVAILABLE),
noFocusTexture='tvguide-program-grey.png',
focusTexture='tvguide-program-grey-focus.png'
)
program = src.Program(channel, strings(NO_PROGRAM_AVAILABLE), None, None, None)
self.controlAndProgramList.append(ControlAndProgram(control, program))
# add program controls
if focusFunction is None:
focusFunction = self._findControlAt
focusControl = focusFunction(self.focusPoint)
controls = [elem.control for elem in self.controlAndProgramList]
self.addControls(controls)
if focusControl is not None:
debug('onRedrawEPG - setFocus %d' % focusControl.getId())
self.setFocus(focusControl)
self.ignoreMissingControlIds.extend([elem.control.getId() for elem in self.controlAndProgramList])
if focusControl is None and len(self.controlAndProgramList) > 0:
self.setFocus(self.controlAndProgramList[0].control)
self._hideControl(self.C_MAIN_LOADING)
self.redrawingEPG = False
def _clearEpg(self):
controls = [elem.control for elem in self.controlAndProgramList]
try:
self.removeControls(controls)
except RuntimeError:
for elem in self.controlAndProgramList:
try:
self.removeControl(elem.control)
except RuntimeError:
pass # happens if we try to remove a control that doesn't exist
del self.controlAndProgramList[:]
def onEPGLoadError(self):
self.redrawingEPG = False
self._hideControl(self.C_MAIN_LOADING)
xbmcgui.Dialog().ok(strings(LOAD_ERROR_TITLE), strings(LOAD_ERROR_LINE1), strings(LOAD_ERROR_LINE2))
self.close()
def onSourceNotConfigured(self):
self.redrawingEPG = False
self._hideControl(self.C_MAIN_LOADING)
xbmcgui.Dialog().ok(strings(LOAD_ERROR_TITLE), strings(LOAD_ERROR_LINE1), strings(CONFIGURATION_ERROR_LINE2))
self.close()
def isSourceInitializationCancelled(self):
return xbmc.abortRequested or self.isClosing
def onSourceInitialized(self, success):
if success:
self.notification = Notification(self.database, ADDON.getAddonInfo('path'))
self.onRedrawEPG(0, self.viewStartDate)
def onSourceProgressUpdate(self, percentageComplete):
control = self.getControl(self.C_MAIN_LOADING_PROGRESS)
if percentageComplete < 1:
if control:
control.setPercent(1)
self.progressStartTime = datetime.datetime.now()
self.progressPreviousPercentage = percentageComplete
elif percentageComplete != self.progressPreviousPercentage:
if control:
control.setPercent(percentageComplete)
self.progressPreviousPercentage = percentageComplete
delta = datetime.datetime.now() - self.progressStartTime
if percentageComplete < 20:
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(CALCULATING_REMAINING_TIME))
else:
secondsLeft = int(delta.seconds) / float(percentageComplete) * (100.0 - percentageComplete)
if secondsLeft > 30:
secondsLeft -= secondsLeft % 10
self.setControlLabel(self.C_MAIN_LOADING_TIME_LEFT, strings(TIME_LEFT) % secondsLeft)
return not xbmc.abortRequested and not self.isClosing
def check_is_playing(self):
is_playing = self.player.isPlaying()
play_data = {}
if not self.isClosing:
f = open(self.proc_file, 'r')
data = f.read()
if len(data) > 0:
is_playing = True
play_data = json.loads(data)
f.close()
debug('[%s] Checking Play-State... is_playing: %s, data: %s '
% (ADDON.getAddonInfo('id'), str(is_playing), str(play_data)))
return is_playing, play_data
def set_playing(self):
f = open(self.proc_file, 'w')
data = {'timestamp': datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
'y': self.focusPoint.y, 'idx': self.channelIdx}
f.write(json.dumps(data))
f.close()
def reset_playing(self):
reset_playing()
def onPlayBackStopped(self):
if not self.player.isPlaying() and not self.isClosing:
is_playing, play_data = self.check_is_playing()
self._hideControl(self.C_MAIN_OSD)
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
if is_playing and 'idx' in play_data:
self.viewStartDate = datetime.datetime.today()
self.viewStartDate -= datetime.timedelta(minutes=self.viewStartDate.minute % 30,
seconds=self.viewStartDate.second)
self.channelIdx = play_data['idx']
if self.database and 'y' in play_data:
self.focusPoint.y = play_data['y']
self.onRedrawEPG(self.channelIdx, self.viewStartDate,
focusFunction=self._findCurrentTimeslot)
self.reset_playing()
def _secondsToXposition(self, seconds):
return self.epgView.left + (seconds * self.epgView.width / 7200)
def _findControlOnRight(self, point):
distanceToNearest = 10000
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
x = left + (control.getWidth() / 2)
y = top + (control.getHeight() / 2)
if point.x < x and point.y == y:
distance = abs(point.x - x)
if distance < distanceToNearest:
distanceToNearest = distance
nearestControl = control
return nearestControl
def _findControlOnLeft(self, point):
distanceToNearest = 10000
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
x = left + (control.getWidth() / 2)
y = top + (control.getHeight() / 2)
if point.x > x and point.y == y:
distance = abs(point.x - x)
if distance < distanceToNearest:
distanceToNearest = distance
nearestControl = control
return nearestControl
def _findControlBelow(self, point):
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(leftEdge, top) = control.getPosition()
y = top + (control.getHeight() / 2)
if point.y < y:
rightEdge = leftEdge + control.getWidth()
if leftEdge <= point.x < rightEdge and (
nearestControl is None or nearestControl.getPosition()[1] > top):
nearestControl = control
return nearestControl
def _findControlAbove(self, point):
nearestControl = None
for elem in self.controlAndProgramList:
control = elem.control
(leftEdge, top) = control.getPosition()
y = top + (control.getHeight() / 2)
if point.y > y:
rightEdge = leftEdge + control.getWidth()
if leftEdge <= point.x < rightEdge and (
nearestControl is None or nearestControl.getPosition()[1] < top):
nearestControl = control
return nearestControl
def _findControlAt(self, point):
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
bottom = top + control.getHeight()
right = left + control.getWidth()
if left <= point.x <= right and top <= point.y <= bottom:
return control
return None
def _findCurrentTimeslot(self, point):
y = point.y
control = self.getControl(self.C_MAIN_TIMEBAR)
if control:
(x, _) = control.getPosition()
else:
x = point.x
for elem in self.controlAndProgramList:
control = elem.control
(left, top) = control.getPosition()
bottom = top + control.getHeight()
right = left + control.getWidth()
if left <= x <= right and top <= y <= bottom:
return control
return None
def _getProgramFromControl(self, control):
for elem in self.controlAndProgramList:
if elem.control == control:
return elem.program
return None
def _hideControl(self, *controlIds):
"""
Visibility is inverted in skin
"""
for controlId in controlIds:
control = self.getControl(controlId)
if control:
control.setVisible(True)
def _showControl(self, *controlIds):
"""
Visibility is inverted in skin
"""
for controlId in controlIds:
control = self.getControl(controlId)
if control:
control.setVisible(False)
def formatTime(self, timestamp):
if timestamp:
format = xbmc.getRegion('time').replace(':%S', '').replace('%H%H', '%H')
return timestamp.strftime(format)
else:
return ''
def formatDate(self, timestamp, longdate=False):
if timestamp:
if longdate == True:
format = xbmc.getRegion('datelong')
else:
format = xbmc.getRegion('dateshort')
return timestamp.strftime(format)
else:
return ''
def setControlImage(self, controlId, image):
control = self.getControl(controlId)
if control:
control.setImage(image.encode('utf-8'))
def setControlLabel(self, controlId, label):
control = self.getControl(controlId)
if control and label:
control.setLabel(label)
def setControlText(self, controlId, text):
control = self.getControl(controlId)
if control:
control.setText(text)
def updateTimebar(self, scheduleTimer=True):
# move timebar to current time
timeDelta = datetime.datetime.today() - self.viewStartDate
control = self.getControl(self.C_MAIN_TIMEBAR)
if control:
(x, y) = control.getPosition()
try:
# Sometimes raises:
# exceptions.RuntimeError: Unknown exception thrown from the call "setVisible"
control.setVisible(timeDelta.days == 0)
except:
pass
control.setPosition(self._secondsToXposition(timeDelta.seconds), y)
if scheduleTimer and not xbmc.abortRequested and not self.isClosing:
threading.Timer(1, self.updateTimebar).start()
class PopupMenu(xbmcgui.WindowXMLDialog):
C_POPUP_PLAY = 4000
C_POPUP_CHOOSE_STREAM = 4001
C_POPUP_REMIND = 4002
C_POPUP_CHANNELS = 4003
C_POPUP_QUIT = 4004
C_POPUP_PLAY_BEGINNING = 4005
C_POPUP_CHANNEL_LOGO = 4100
C_POPUP_CHANNEL_TITLE = 4101
C_POPUP_PROGRAM_TITLE = 4102
C_POPUP_LIBMOV = 80000
C_POPUP_LIBTV = 80001
C_POPUP_VIDEOADDONS = 80002
def __new__(cls, database, program, showRemind):
return super(PopupMenu, cls).__new__(cls, 'script-tvguide-menu.xml', ADDON.getAddonInfo('path'), SKIN)
def __init__(self, database, program, showRemind):
"""
@type database: source.Database
@param program:
@type program: source.Program
@param showRemind:
"""
super(PopupMenu, self).__init__()
self.database = database
self.program = program
self.showRemind = showRemind
self.buttonClicked = None
def onInit(self):
playControl = self.getControl(self.C_POPUP_PLAY)
remindControl = self.getControl(self.C_POPUP_REMIND)
channelLogoControl = self.getControl(self.C_POPUP_CHANNEL_LOGO)
channelTitleControl = self.getControl(self.C_POPUP_CHANNEL_TITLE)
programTitleControl = self.getControl(self.C_POPUP_PROGRAM_TITLE)
programPlayBeginningControl = self.getControl(self.C_POPUP_PLAY_BEGINNING)
playControl.setLabel(strings(WATCH_CHANNEL, self.program.channel.title))
if not self.program.channel.isPlayable():
playControl.setEnabled(False)
self.setFocusId(self.C_POPUP_CHOOSE_STREAM)
if self.database.getCustomStreamUrl(self.program.channel):
chooseStrmControl = self.getControl(self.C_POPUP_CHOOSE_STREAM)
chooseStrmControl.setLabel(strings(REMOVE_STRM_FILE))
if self.program.channel.logo is not None:
channelLogoControl.setImage(self.program.channel.logo)
channelTitleControl.setVisible(False)
else:
channelTitleControl.setLabel(self.program.channel.title)
channelLogoControl.setVisible(False)
programTitleControl.setLabel(self.program.title)
if self.program.startDate:
remindControl.setEnabled(True)
if self.showRemind:
remindControl.setLabel(strings(REMIND_PROGRAM))
else:
remindControl.setLabel(strings(DONT_REMIND_PROGRAM))
else:
remindControl.setEnabled(False)
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
def onClick(self, controlId):
if controlId == self.C_POPUP_CHOOSE_STREAM and self.database.getCustomStreamUrl(self.program.channel):
self.database.deleteCustomStreamUrl(self.program.channel)
chooseStrmControl = self.getControl(self.C_POPUP_CHOOSE_STREAM)
chooseStrmControl.setLabel(strings(CHOOSE_STRM_FILE))
if not self.program.channel.isPlayable():
playControl = self.getControl(self.C_POPUP_PLAY)
playControl.setEnabled(False)
else:
self.buttonClicked = controlId
self.close()
def onFocus(self, controlId):
pass
class ChannelsMenu(xbmcgui.WindowXMLDialog):
C_CHANNELS_LIST = 6000
C_CHANNELS_SELECTION_VISIBLE = 6001
C_CHANNELS_SELECTION = 6002
C_CHANNELS_SAVE = 6003
C_CHANNELS_CANCEL = 6004
def __new__(cls, database):
return super(ChannelsMenu, cls).__new__(cls, 'script-tvguide-channels.xml', ADDON.getAddonInfo('path'), SKIN)
def __init__(self, database):
"""
@type database: source.Database
"""
super(ChannelsMenu, self).__init__()
self.database = database
self.channelList = database.getChannelList(onlyVisible=False)
self.swapInProgress = False
self.selectedChannel = 0
def onInit(self):
self.updateChannelList()
self.setFocusId(self.C_CHANNELS_LIST)
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, KEY_NAV_BACK]:
self.close()
return
if self.getFocusId() == self.C_CHANNELS_LIST and action.getId() in [ACTION_PREVIOUS_MENU, KEY_CONTEXT_MENU,
ACTION_LEFT]:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
self.selectedChannel = idx
buttonControl = self.getControl(self.C_CHANNELS_SELECTION)
buttonControl.setLabel('[B]%s[/B]' % self.channelList[idx].title)
self.getControl(self.C_CHANNELS_SELECTION_VISIBLE).setVisible(False)
self.setFocusId(self.C_CHANNELS_SELECTION)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() in [ACTION_RIGHT, ACTION_SELECT_ITEM]:
self.getControl(self.C_CHANNELS_SELECTION_VISIBLE).setVisible(True)
xbmc.sleep(350)
self.setFocusId(self.C_CHANNELS_LIST)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() in [ACTION_PREVIOUS_MENU,
KEY_CONTEXT_MENU]:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
self.swapChannels(self.selectedChannel, idx)
self.getControl(self.C_CHANNELS_SELECTION_VISIBLE).setVisible(True)
xbmc.sleep(350)
self.setFocusId(self.C_CHANNELS_LIST)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() == ACTION_UP:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
if idx > 0:
self.swapChannels(idx, idx - 1)
elif self.getFocusId() == self.C_CHANNELS_SELECTION and action.getId() == ACTION_DOWN:
listControl = self.getControl(self.C_CHANNELS_LIST)
idx = listControl.getSelectedPosition()
if idx < listControl.size() - 1:
self.swapChannels(idx, idx + 1)
def onClick(self, controlId):
if controlId == self.C_CHANNELS_LIST:
listControl = self.getControl(self.C_CHANNELS_LIST)
item = listControl.getSelectedItem()
channel = self.channelList[int(item.getProperty('idx'))]
channel.visible = not channel.visible
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item.setIconImage(iconImage)
elif controlId == self.C_CHANNELS_SAVE:
self.database.saveChannelList(self.close, self.channelList)
elif controlId == self.C_CHANNELS_CANCEL:
self.close()
def onFocus(self, controlId):
pass
def updateChannelList(self):
listControl = self.getControl(self.C_CHANNELS_LIST)
listControl.reset()
for idx, channel in enumerate(self.channelList):
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item = xbmcgui.ListItem('%3d. %s' % (idx + 1, channel.title), iconImage=iconImage)
item.setProperty('idx', str(idx))
listControl.addItem(item)
def updateListItem(self, idx, item):
channel = self.channelList[idx]
item.setLabel('%3d. %s' % (idx + 1, channel.title))
if channel.visible:
iconImage = 'tvguide-channel-visible.png'
else:
iconImage = 'tvguide-channel-hidden.png'
item.setIconImage(iconImage)
item.setProperty('idx', str(idx))
def swapChannels(self, fromIdx, toIdx):
if self.swapInProgress:
return
self.swapInProgress = True
c = self.channelList[fromIdx]
self.channelList[fromIdx] = self.channelList[toIdx]
self.channelList[toIdx] = c
# recalculate weight
for idx, channel in enumerate(self.channelList):
channel.weight = idx
listControl = self.getControl(self.C_CHANNELS_LIST)
self.updateListItem(fromIdx, listControl.getListItem(fromIdx))
self.updateListItem(toIdx, listControl.getListItem(toIdx))
listControl.selectItem(toIdx)
xbmc.sleep(50)
self.swapInProgress = False
class StreamSetupDialog(xbmcgui.WindowXMLDialog):
C_STREAM_STRM_TAB = 101
C_STREAM_FAVOURITES_TAB = 102
C_STREAM_ADDONS_TAB = 103
C_STREAM_STRM_BROWSE = 1001
C_STREAM_STRM_FILE_LABEL = 1005
C_STREAM_STRM_PREVIEW = 1002
C_STREAM_STRM_OK = 1003
C_STREAM_STRM_CANCEL = 1004
C_STREAM_FAVOURITES = 2001
C_STREAM_FAVOURITES_PREVIEW = 2002
C_STREAM_FAVOURITES_OK = 2003
C_STREAM_FAVOURITES_CANCEL = 2004
C_STREAM_ADDONS = 3001
C_STREAM_ADDONS_STREAMS = 3002
C_STREAM_ADDONS_NAME = 3003
C_STREAM_ADDONS_DESCRIPTION = 3004
C_STREAM_ADDONS_PREVIEW = 3005
C_STREAM_ADDONS_OK = 3006
C_STREAM_ADDONS_CANCEL = 3007
C_STREAM_VISIBILITY_MARKER = 100
VISIBLE_STRM = 'strm'
VISIBLE_FAVOURITES = 'favourites'
VISIBLE_ADDONS = 'addons'
def __new__(cls, database, channel):
return super(StreamSetupDialog, cls).__new__(cls, 'script-tvguide-streamsetup.xml', ADDON.getAddonInfo('path'),
SKIN)
def __init__(self, database, channel):
"""
@type database: source.Database
@type channel:source.Channel
"""
super(StreamSetupDialog, self).__init__()
self.database = database
self.channel = channel
self.player = xbmc.Player()
self.previousAddonId = None
self.strmFile = None
self.streamingService = streaming.StreamsService(ADDON)
def close(self):
if self.player.isPlaying():
self.player.stop()
super(StreamSetupDialog, self).close()
def onInit(self):
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_STRM)
favourites = self.streamingService.loadFavourites()
items = list()
for label, value in favourites:
item = xbmcgui.ListItem(label)
item.setProperty('stream', value)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_FAVOURITES)
listControl.addItems(items)
items = list()
for id in self.streamingService.getAddons():
try:
addon = xbmcaddon.Addon(id) # raises Exception if addon is not installed
item = xbmcgui.ListItem(addon.getAddonInfo('name'), iconImage=addon.getAddonInfo('icon'))
item.setProperty('addon_id', id)
items.append(item)
except Exception:
pass
listControl = self.getControl(StreamSetupDialog.C_STREAM_ADDONS)
listControl.addItems(items)
self.updateAddonInfo()
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK, KEY_CONTEXT_MENU]:
self.close()
return
elif self.getFocusId() == self.C_STREAM_ADDONS:
self.updateAddonInfo()
def onClick(self, controlId):
if controlId == self.C_STREAM_STRM_BROWSE:
stream = xbmcgui.Dialog().browse(1, ADDON.getLocalizedString(30304), 'video', '.strm')
if stream:
self.database.setCustomStreamUrl(self.channel, stream)
self.getControl(self.C_STREAM_STRM_FILE_LABEL).setText(stream)
self.strmFile = stream
elif controlId == self.C_STREAM_ADDONS_OK:
listControl = self.getControl(self.C_STREAM_ADDONS_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_FAVOURITES_OK:
listControl = self.getControl(self.C_STREAM_FAVOURITES)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
self.database.setCustomStreamUrl(self.channel, stream)
self.close()
elif controlId == self.C_STREAM_STRM_OK:
self.database.setCustomStreamUrl(self.channel, self.strmFile)
self.close()
elif controlId in [self.C_STREAM_ADDONS_CANCEL, self.C_STREAM_FAVOURITES_CANCEL, self.C_STREAM_STRM_CANCEL]:
self.close()
elif controlId in [self.C_STREAM_ADDONS_PREVIEW, self.C_STREAM_FAVOURITES_PREVIEW, self.C_STREAM_STRM_PREVIEW]:
if self.player.isPlaying():
self.player.stop()
self.getControl(self.C_STREAM_ADDONS_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_FAVOURITES_PREVIEW).setLabel(strings(PREVIEW_STREAM))
self.getControl(self.C_STREAM_STRM_PREVIEW).setLabel(strings(PREVIEW_STREAM))
return
stream = None
visible = self.getControl(self.C_STREAM_VISIBILITY_MARKER).getLabel()
if visible == self.VISIBLE_ADDONS:
listControl = self.getControl(self.C_STREAM_ADDONS_STREAMS)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_FAVOURITES:
listControl = self.getControl(self.C_STREAM_FAVOURITES)
item = listControl.getSelectedItem()
if item:
stream = item.getProperty('stream')
elif visible == self.VISIBLE_STRM:
stream = self.strmFile
if stream is not None:
self.player.play(item=stream, windowed=True)
if self.player.isPlaying():
self.getControl(self.C_STREAM_ADDONS_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_FAVOURITES_PREVIEW).setLabel(strings(STOP_PREVIEW))
self.getControl(self.C_STREAM_STRM_PREVIEW).setLabel(strings(STOP_PREVIEW))
def onFocus(self, controlId):
if controlId == self.C_STREAM_STRM_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_STRM)
elif controlId == self.C_STREAM_FAVOURITES_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_FAVOURITES)
elif controlId == self.C_STREAM_ADDONS_TAB:
self.getControl(self.C_STREAM_VISIBILITY_MARKER).setLabel(self.VISIBLE_ADDONS)
def updateAddonInfo(self):
listControl = self.getControl(self.C_STREAM_ADDONS)
item = listControl.getSelectedItem()
if item is None:
return
if item.getProperty('addon_id') == self.previousAddonId:
return
self.previousAddonId = item.getProperty('addon_id')
addon = xbmcaddon.Addon(id=item.getProperty('addon_id'))
self.getControl(self.C_STREAM_ADDONS_NAME).setLabel('[B]%s[/B]' % addon.getAddonInfo('name'))
self.getControl(self.C_STREAM_ADDONS_DESCRIPTION).setText(addon.getAddonInfo('description'))
streams = self.streamingService.getAddonStreams(item.getProperty('addon_id'))
items = list()
for (label, stream) in streams:
if item.getProperty('addon_id') == "plugin.video.meta":
label = self.channel.title
stream = stream.replace("<channel>", self.channel.title.replace(" ", "%20"))
item = xbmcgui.ListItem(label)
item.setProperty('stream', stream)
items.append(item)
listControl = self.getControl(StreamSetupDialog.C_STREAM_ADDONS_STREAMS)
listControl.reset()
listControl.addItems(items)
class ChooseStreamAddonDialog(xbmcgui.WindowXMLDialog):
C_SELECTION_LIST = 1000
def __new__(cls, addons):
return super(ChooseStreamAddonDialog, cls).__new__(cls, 'script-tvguide-streamaddon.xml',
ADDON.getAddonInfo('path'), SKIN)
def __init__(self, addons):
super(ChooseStreamAddonDialog, self).__init__()
self.addons = addons
self.stream = None
def onInit(self):
items = list()
for id, label, url in self.addons:
addon = xbmcaddon.Addon(id)
item = xbmcgui.ListItem(label, addon.getAddonInfo('name'), addon.getAddonInfo('icon'))
item.setProperty('stream', url)
items.append(item)
listControl = self.getControl(ChooseStreamAddonDialog.C_SELECTION_LIST)
listControl.addItems(items)
self.setFocus(listControl)
def onAction(self, action):
if action.getId() in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, KEY_NAV_BACK]:
self.close()
def onClick(self, controlId):
if controlId == ChooseStreamAddonDialog.C_SELECTION_LIST:
listControl = self.getControl(ChooseStreamAddonDialog.C_SELECTION_LIST)
self.stream = listControl.getSelectedItem().getProperty('stream')
self.close()
def onFocus(self, controlId):
pass
| JamesLinEngineer/RKMC | addons/script.ftvguide/gui.py | Python | gpl-2.0 | 60,618 |
from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, \
addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config
from Components.Converter.Converter import Converter
from Components.Sources.Source import Source, ObsoleteSource
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_FONTS, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists, SCOPE_SKIN_IMAGE
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
from Components.SystemInfo import SystemInfo
colorNames = {}
# Predefined fonts, typically used in built-in screens and for components like
# the movie list and so.
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
parameters = {}
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
# read the skin
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
try:
dom_skins.append((mpath, xml.etree.cElementTree.parse(filename).getroot()))
except:
print "[SKIN ERROR] error in %s" % filename
return False
else:
return True
return False
# get own skin_user_skinname.xml file, if exist
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
# we do our best to always select the "right" value
# skins are loaded in order of priority: skin with
# highest priority is loaded last, usually the user-provided
# skin.
# currently, loadSingleSkinData (colors, bordersets etc.)
# are applied one-after-each, in order of ascending priority.
# the dom_skin will keep all screens in descending priority,
# so the first screen found will be used.
# example: loadSkin("nemesis_greenline/skin.xml")
config.skin = ConfigSubsection()
DEFAULT_SKIN = "PLi-HD/skin.xml"
# on SD hardware, PLi-HD will not be available
if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
# in that case, fallback to Magic (which is an SD skin)
DEFAULT_SKIN = "Magic/skin.xml"
config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN)
profile("LoadSkin")
res = None
name = skin_user_skinname()
if name:
res = addSkin(name, SCOPE_CONFIG)
if not name or not res:
addSkin('skin_user.xml', SCOPE_CONFIG)
# some boxes lie about their dimensions
addSkin('skin_box.xml')
# add optional discrete second infobar
addSkin('skin_second_infobar.xml')
display_skin_id = 1
addSkin('skin_display.xml')
addSkin('skin_text.xml')
addSkin('skin_subtitles.xml')
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
#
# Convert a string into a number. Used to convert object position and size attributes into a number
# s is the input string.
# e is the the parent object size to do relative calculations on parent
# size is the size of the object size (e.g. width or height)
# font is a font object to calculate relative to font sizes
# Note some constructs for speeding # up simple cases that are very common.
# Can do things like: 10+center-10w+4%
# To center the widget on the parent widget,
# but move forward 10 pixels and 4% of parent width
# and 10 character widths backward
# Multiplication, division and subexprsssions are also allowed: 3*(e-c/2)
#
# Usage: center : center the object on parent based on parent size and object size
# e : take the parent size/width
# c : take the center point of parent size/width
# % : take given percentag of parent size/width
# w : multiply by current font width
# h : multiply by current font height
#
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center": # for speed, can be common case
val = (e - size)/2
elif s == '*':
return None
else:
try:
val = int(s) # for speed
except:
if 't' in s:
s = s.replace("center", str((e-size)/2.0))
if 'e' in s:
s = s.replace("e", str(e))
if 'c' in s:
s = s.replace("c", str(e/2.0))
if 'w' in s:
s = s.replace("w", "*" + str(fonts[font][3]))
if 'h' in s:
s = s.replace("h", "*" + str(fonts[font][2]))
if '%' in s:
s = s.replace("%", "*" + str(e/100.0))
try:
val = int(s) # for speed
except:
val = eval(s)
if val < 0:
return 0
return int(val) # make sure an integer value is returned
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
# For some widgets (e.g. ScrollLabel) the skin attributes are applied to
# a child widget, instead of to the widget itself. In that case, the parent
# we have here is not the real parent, but it is the main widget.
# We have to go one level higher to get the actual parent.
# We can detect this because the 'parent' will not have a size yet
# (the main widget's size will be calculated internally, as soon as the child
# widget has parsed the skin attributes)
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
#widget has no parent, use desktop size instead for relative coordinates
size = desktop.size()
return size
def parseValuePair(s, scale, object = None, desktop = None, size = None):
x, y = s.split(',')
parentsize = eSize()
if object and ('c' in x or 'c' in y or 'e' in x or 'e' in y or
'%' in x or '%' in y): # need parent size for ce%
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width() or 0)
yval = parseCoordinate(y, parentsize.height(), size and size.height() or 0)
return (xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parsePosition(s, scale, object = None, desktop = None, size = None):
(x, y) = parseValuePair(s, scale, object, desktop, size)
return ePoint(x, y)
def parseSize(s, scale, object = None, desktop = None):
(x, y) = parseValuePair(s, scale, object, desktop)
return eSize(x, y)
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #aarrggbb or valid named color" % (s))
return gRGB(int(s[1:], 0x10))
def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))):
# walk all attributes
size = None
pos = None
font = None
for attrib, value in node.items():
if attrib not in ignore:
if attrib in filenames:
value = resolveFilename(SCOPE_CURRENT_SKIN, value, path_prefix=skin_path_prefix)
# Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder)
# it needs to be set at least before the size is set, in order for the
# window dimensions to be calculated correctly in all situations.
# If wfNoBorder is applied after the size has been set, the window will fail to clear the title area.
# Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after
# the size, a scrollbar will not be shown until the selection moves for the first time
if attrib == 'size':
size = value.encode("utf-8")
elif attrib == 'position':
pos = value.encode("utf-8")
elif attrib == 'font':
font = value.encode("utf-8")
skinAttributes.append((attrib, font))
else:
skinAttributes.append((attrib, value.encode("utf-8")))
if pos is not None:
pos, size = context.parse(pos, size, font)
skinAttributes.append(('position', pos))
if size is not None:
skinAttributes.append(('size', size))
def morphRcImagePath(value):
if rc_model.rcIsDefault() is False:
if value == '/usr/share/enigma2/skin_default/rc.png' or value == '/usr/share/enigma2/skin_default/rcold.png':
value = rc_model.getRcImg()
return value
def loadPixmap(path, desktop):
option = path.find("#")
if option != -1:
path = path[:option]
ptr = LoadPixmap(morphRcImagePath(path), desktop)
if ptr is None:
raise SkinError("pixmap file %s not found!" % (path))
return ptr
class AttributeParser:
def __init__(self, guiObject, desktop, scale=((1,1),(1,1))):
self.guiObject = guiObject
self.desktop = desktop
self.scaleTuple = scale
def applyOne(self, attrib, value):
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
def applyAll(self, attrs):
for attrib, value in attrs:
self.applyOne(attrib, value)
def conditional(self, value):
pass
def position(self, value):
if isinstance(value, tuple):
self.guiObject.move(ePoint(*value))
else:
self.guiObject.move(parsePosition(value, self.scaleTuple, self.guiObject, self.desktop, self.guiObject.csize()))
def size(self, value):
if isinstance(value, tuple):
self.guiObject.resize(eSize(*value))
else:
self.guiObject.resize(parseSize(value, self.scaleTuple, self.guiObject, self.desktop))
def title(self, value):
self.guiObject.setTitle(_(value))
def text(self, value):
self.guiObject.setText(_(value))
def font(self, value):
self.guiObject.setFont(parseFont(value, self.scaleTuple))
def zPosition(self, value):
self.guiObject.setZPosition(int(value))
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setPixmap(ptr)
def backgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setBackgroundPicture(ptr)
def selectionPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSelectionPicture(ptr)
def sliderPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSliderPicture(ptr)
def scrollbarbackgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setScrollbarBackgroundPicture(ptr)
def alphatest(self, value):
self.guiObject.setAlphatest(
{ "on": 1,
"off": 0,
"blend": 2,
}[value])
def scale(self, value):
self.guiObject.setScale(1)
def orientation(self, value): # used by eSlider
try:
self.guiObject.setOrientation(*
{ "orVertical": (self.guiObject.orVertical, False),
"orTopToBottom": (self.guiObject.orVertical, False),
"orBottomToTop": (self.guiObject.orVertical, True),
"orHorizontal": (self.guiObject.orHorizontal, False),
"orLeftToRight": (self.guiObject.orHorizontal, False),
"orRightToLeft": (self.guiObject.orHorizontal, True),
}[value])
except KeyError:
print "oprientation must be either orVertical or orHorizontal!"
def valign(self, value):
try:
self.guiObject.setVAlign(
{ "top": self.guiObject.alignTop,
"center": self.guiObject.alignCenter,
"bottom": self.guiObject.alignBottom
}[value])
except KeyError:
print "valign must be either top, center or bottom!"
def halign(self, value):
try:
self.guiObject.setHAlign(
{ "left": self.guiObject.alignLeft,
"center": self.guiObject.alignCenter,
"right": self.guiObject.alignRight,
"block": self.guiObject.alignBlock
}[value])
except KeyError:
print "halign must be either left, center, right or block!"
def textOffset(self, value):
x, y = value.split(',')
self.guiObject.setTextOffset(ePoint(int(x) * self.scaleTuple[0][0] / self.scaleTuple[0][1], int(y) * self.scaleTuple[1][0] / self.scaleTuple[1][1]))
def flags(self, value):
flags = value.split(',')
for f in flags:
try:
fv = eWindow.__dict__[f]
self.guiObject.setFlag(fv)
except KeyError:
print "illegal flag %s!" % f
def backgroundColor(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def backgroundColorSelected(self, value):
self.guiObject.setBackgroundColorSelected(parseColor(value))
def foregroundColor(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def foregroundColorSelected(self, value):
self.guiObject.setForegroundColorSelected(parseColor(value))
def shadowColor(self, value):
self.guiObject.setShadowColor(parseColor(value))
def selectionDisabled(self, value):
self.guiObject.setSelectionEnable(0)
def transparent(self, value):
self.guiObject.setTransparent(int(value))
def borderColor(self, value):
self.guiObject.setBorderColor(parseColor(value))
def borderWidth(self, value):
self.guiObject.setBorderWidth(int(value))
def scrollbarMode(self, value):
self.guiObject.setScrollbarMode(getattr(self.guiObject, value))
# { "showOnDemand": self.guiObject.showOnDemand,
# "showAlways": self.guiObject.showAlways,
# "showNever": self.guiObject.showNever,
# "showLeft": self.guiObject.showLeft
# }[value])
def enableWrapAround(self, value):
self.guiObject.setWrapAround(True)
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(0, ptr, pos)
def seek_pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(1, ptr, pos)
def shadowOffset(self, value):
self.guiObject.setShadowOffset(parsePosition(value, self.scaleTuple))
def noWrap(self, value):
self.guiObject.setNoWrap(1)
def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))):
# Someone still using applySingleAttribute?
AttributeParser(guiObject, desktop, scale).applyOne(attrib, value)
def applyAllAttributes(guiObject, desktop, attributes, scale):
AttributeParser(guiObject, desktop, scale).applyAll(attributes)
def loadSingleSkinData(desktop, skin, path_prefix):
"""loads skin data like colors, windowstyle etc."""
assert skin.tag == "skin", "root element in skin must be 'skin'!"
for c in skin.findall("output"):
id = c.attrib.get('id')
if id:
id = int(id)
else:
id = 0
if id == 0: # framebuffer
for res in c.findall("resolution"):
get_attr = res.attrib.get
xres = get_attr("xres")
if xres:
xres = int(xres)
else:
xres = 720
yres = get_attr("yres")
if yres:
yres = int(yres)
else:
yres = 576
bpp = get_attr("bpp")
if bpp:
bpp = int(bpp)
else:
bpp = 32
#print "Resolution:", xres,yres,bpp
from enigma import gMainDC
gMainDC.getInstance().setResolution(xres, yres)
desktop.resize(eSize(xres, yres))
if bpp != 32:
# load palette (not yet implemented)
pass
if yres >= 1080:
parameters["FileListName"] = (68,4,1000,34)
parameters["FileListIcon"] = (7,4,52,37)
parameters["FileListMultiName"] = (90,3,1000,32)
parameters["FileListMultiIcon"] = (45, 4, 30, 30)
parameters["FileListMultiLock"] = (2,0,36,36)
parameters["ChoicelistDash"] = (0,3,1000,30)
parameters["ChoicelistName"] = (68,3,1000,30)
parameters["ChoicelistIcon"] = (7,0,52,38)
parameters["PluginBrowserName"] = (180,8,38)
parameters["PluginBrowserDescr"] = (180,42,25)
parameters["PluginBrowserIcon"] = (15,8,150,60)
parameters["PluginBrowserDownloadName"] = (120,8,38)
parameters["PluginBrowserDownloadDescr"] = (120,42,25)
parameters["PluginBrowserDownloadIcon"] = (15,0,90,76)
parameters["ServiceInfo"] = (0,0,450,50)
parameters["ServiceInfoLeft"] = (0,0,450,45)
parameters["ServiceInfoRight"] = (450,0,1000,45)
parameters["SelectionListDescr"] = (45,3,1000,32)
parameters["SelectionListLock"] = (0,2,36,36)
parameters["ConfigListSeperator"] = 300
parameters["VirtualKeyboard"] = (68,68)
parameters["PartnerBoxEntryListName"] = (8,2,225,38)
parameters["PartnerBoxEntryListIP"] = (180,2,225,38)
parameters["PartnerBoxEntryListPort"] = (405,2,150,38)
parameters["PartnerBoxEntryListType"] = (615,2,150,38)
parameters["PartnerBoxTimerServicename"] = (0,0,45)
parameters["PartnerBoxTimerName"] = (0,42,30)
parameters["PartnerBoxE1TimerTime"] = (0,78,255,30)
parameters["PartnerBoxE1TimerState"] = (255,78,255,30)
parameters["PartnerBoxE2TimerTime"] = (0,78,225,30)
parameters["PartnerBoxE2TimerState"] = (225,78,225,30)
parameters["PartnerBoxE2TimerIcon"] = (1050,8,20,20)
parameters["PartnerBoxE2TimerIconRepeat"] = (1050,38,20,20)
parameters["PartnerBoxBouquetListName"] = (0,0,45)
parameters["PartnerBoxChannelListName"] = (0,0,45)
parameters["PartnerBoxChannelListTitle"] = (0,42,30)
parameters["PartnerBoxChannelListTime"] = (0,78,225,30)
parameters["HelpMenuListHlp"] = (0,0,900,42)
parameters["HelpMenuListExtHlp0"] = (0,0,900,39)
parameters["HelpMenuListExtHlp1"] = (0,42,900,30)
parameters["AboutHddSplit"] = 1
parameters["DreamexplorerName"] = (62,0,1200,38)
parameters["DreamexplorerIcon"] = (15,4,30,30)
parameters["PicturePlayerThumb"] = (30,285,45,300,30,25)
parameters["PlayListName"] = (38,2,1000,34)
parameters["PlayListIcon"] = (7,7,24,24)
parameters["SHOUTcastListItem"] = (30,27,35,96,35,33,60,32)
for skininclude in skin.findall("include"):
filename = skininclude.attrib.get("filename")
if filename:
skinfile = resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix)
if not fileExists(skinfile):
skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
if fileExists(skinfile):
print "[SKIN] loading include:", skinfile
loadSkin(skinfile)
for c in skin.findall("colors"):
for color in c.findall("color"):
get_attr = color.attrib.get
name = get_attr("name")
color = get_attr("value")
if name and color:
colorNames[name] = parseColor(color)
#print "Color:", name, color
else:
raise SkinError("need color and name, got %s %s" % (name, color))
for c in skin.findall("fonts"):
for font in c.findall("font"):
get_attr = font.attrib.get
filename = get_attr("filename", "<NONAME>")
name = get_attr("name", "Regular")
scale = get_attr("scale")
if scale:
scale = int(scale)
else:
scale = 100
is_replacement = get_attr("replacement") and True or False
render = get_attr("render")
if render:
render = int(render)
else:
render = 0
resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix)
if not fileExists(resolved_font): #when font is not available look at current skin path
skin_path = resolveFilename(SCOPE_CURRENT_SKIN, filename)
if fileExists(skin_path):
resolved_font = skin_path
addFont(resolved_font, name, scale, is_replacement, render)
#print "Font: ", resolved_font, name, scale, is_replacement
for alias in c.findall("alias"):
get = alias.attrib.get
try:
name = get("name")
font = get("font")
size = int(get("size"))
height = int(get("height", size)) # to be calculated some day
width = int(get("width", size))
global fonts
fonts[name] = (font, size, height, width)
except Exception, ex:
print "[SKIN] bad font alias", ex
for c in skin.findall("parameters"):
for parameter in c.findall("parameter"):
get = parameter.attrib.get
try:
name = get("name")
value = get("value")
parameters[name] = "," in value and map(int, value.split(",")) or int(value)
except Exception, ex:
print "[SKIN] bad parameter", ex
for c in skin.findall("subtitles"):
from enigma import eWidget, eSubtitleWidget
scale = ((1,1),(1,1))
for substyle in c.findall("sub"):
get_attr = substyle.attrib.get
font = parseFont(get_attr("font"), scale)
col = get_attr("foregroundColor")
if col:
foregroundColor = parseColor(col)
haveColor = 1
else:
foregroundColor = gRGB(0xFFFFFF)
haveColor = 0
col = get_attr("borderColor")
if col:
borderColor = parseColor(col)
else:
borderColor = gRGB(0)
borderwidth = get_attr("borderWidth")
if borderwidth is None:
# default: use a subtitle border
borderWidth = 3
else:
borderWidth = int(borderwidth)
face = eSubtitleWidget.__dict__[get_attr("name")]
eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth)
for windowstyle in skin.findall("windowstyle"):
style = eWindowStyleSkinned()
style_id = windowstyle.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
# defaults
font = gFont("Regular", 20)
offset = eSize(20, 5)
for title in windowstyle.findall("title"):
get_attr = title.attrib.get
offset = parseSize(get_attr("offset"), ((1,1),(1,1)))
font = parseFont(get_attr("font"), ((1,1),(1,1)))
style.setTitleFont(font);
style.setTitleOffset(offset)
#print " ", font, offset
for borderset in windowstyle.findall("borderset"):
bsName = str(borderset.attrib.get("name"))
for pixmap in borderset.findall("pixmap"):
get_attr = pixmap.attrib.get
bpName = get_attr("pos")
filename = get_attr("filename")
if filename and bpName:
png = loadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix), desktop)
style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png)
#print " borderset:", bpName, filename
for color in windowstyle.findall("color"):
get_attr = color.attrib.get
colorType = get_attr("name")
color = parseColor(get_attr("color"))
try:
style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color)
except:
raise SkinError("Unknown color %s" % (colorType))
#pass
#print " color:", type, color
x = eWindowStyleManager.getInstance()
x.setStyle(style_id, style)
for margin in skin.findall("margin"):
style_id = margin.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
r = eRect(0,0,0,0)
v = margin.attrib.get("left")
if v:
r.setLeft(int(v))
v = margin.attrib.get("top")
if v:
r.setTop(int(v))
v = margin.attrib.get("right")
if v:
r.setRight(int(v))
v = margin.attrib.get("bottom")
if v:
r.setBottom(int(v))
# the "desktop" parameter is hardcoded to the UI screen, so we must ask
# for the one that this actually applies to.
getDesktop(style_id).setMargins(r)
dom_screens = {}
def loadSkin(name, scope = SCOPE_SKIN):
# Now a utility for plugins to add skin data to the screens
global dom_screens, display_skin_id
filename = resolveFilename(scope, name)
if fileExists(filename):
path = os.path.dirname(filename) + "/"
for elem in xml.etree.cElementTree.parse(filename).getroot():
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
print "loadSkin: Screen already defined elsewhere:", name
elem.clear()
else:
dom_screens[name] = (elem, path)
else:
elem.clear()
else:
elem.clear()
def loadSkinData(desktop):
# Kinda hackish, but this is called once by mytest.py
global dom_skins
skins = dom_skins[:]
skins.reverse()
for (path, dom_skin) in skins:
loadSingleSkinData(desktop, dom_skin, path)
for elem in dom_skin:
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Kill old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
# without name, it's useless!
elem.clear()
else:
# non-screen element, no need for it any longer
elem.clear()
# no longer needed, we know where the screens are now.
del dom_skins
class additionalWidget:
pass
# Class that makes a tuple look like something else. Some plugins just assume
# that size is a string and try to parse it. This class makes that work.
class SizeTuple(tuple):
def split(self, *args):
return (str(self[0]), str(self[1]))
def strip(self, *args):
return '%s,%s' % self
def __str__(self):
return '%s,%s' % self
class SkinContext:
def __init__(self, parent=None, pos=None, size=None, font=None):
if parent is not None:
if pos is not None:
pos, size = parent.parse(pos, size, font)
self.x, self.y = pos
self.w, self.h = size
else:
self.x = None
self.y = None
self.w = None
self.h = None
def __str__(self):
return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h)
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
self.w = 0
self.h = 0
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
self.h -= h
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
self.h -= h
self.y += h
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
self.x += w
self.w -= w
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
self.w -= w
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return (SizeTuple(pos), SizeTuple(size))
class SkinContextStack(SkinContext):
# A context that stacks things instead of aligning them
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return (SizeTuple(pos), SizeTuple(size))
def readSkin(screen, skin, names, desktop):
if not isinstance(names, list):
names = [names]
# try all skins, first existing one have priority
global dom_screens
for n in names:
myscreen, path = dom_screens.get(n, (None,None))
if myscreen is not None:
# use this name for debug output
name = n
break
else:
name = "<embedded-in-'%s'>" % screen.__class__.__name__
# otherwise try embedded skin
if myscreen is None:
myscreen = getattr(screen, "parsedSkin", None)
# try uncompiled embedded skin
if myscreen is None and getattr(screen, "skin", None):
skin = screen.skin
print "[SKIN] Parsing embedded skin", name
if (isinstance(skin, tuple)):
for s in skin:
candidate = xml.etree.cElementTree.fromstring(s)
if candidate.tag == 'screen':
sid = candidate.attrib.get('id', None)
if (not sid) or (int(sid) == display_skin_id):
myscreen = candidate
break;
else:
print "[SKIN] Hey, no suitable screen!"
else:
myscreen = xml.etree.cElementTree.fromstring(skin)
if myscreen:
screen.parsedSkin = myscreen
if myscreen is None:
print "[SKIN] No skin to read..."
myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>")
screen.skinAttributes = [ ]
skin_path_prefix = getattr(screen, "skin_path", path)
context = SkinContextStack()
s = desktop.bounds()
context.x = s.left()
context.y = s.top()
context.w = s.width()
context.h = s.height()
del s
collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",))
context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size'))
screen.additionalWidgets = [ ]
screen.renderer = [ ]
visited_components = set()
# now walk all widgets and stuff
def process_none(widget, context):
pass
def process_widget(widget, context):
get_attr = widget.attrib.get
# ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped
# widgets (source->renderer).
wname = get_attr('name')
wsource = get_attr('source')
if wname is None and wsource is None:
print "widget has no name and no source!"
return
if wname:
#print "Widget name=", wname
visited_components.add(wname)
# get corresponding 'gui' object
try:
attributes = screen[wname].skinAttributes = [ ]
except:
raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!")
# assert screen[wname] is not Source
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',))
elif wsource:
# get corresponding source
#print "Widget source=", wsource
while True: # until we found a non-obsolete source
# parse our current "wsource", which might specifiy a "related screen" before the dot,
# for example to reference a parent, global or session-global screen.
scr = screen
# resolve all path components
path = wsource.split('.')
while len(path) > 1:
scr = screen.getRelatedScreen(path[0])
if scr is None:
#print wsource
#print name
raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!")
path = path[1:]
# resolve the source.
source = scr.get(path[0])
if isinstance(source, ObsoleteSource):
# however, if we found an "obsolete source", issue warning, and resolve the real source.
print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source)
print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % (source.removal_date)
if source.description:
print source.description
wsource = source.new_source
else:
# otherwise, use that source.
break
if source is None:
raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!")
wrender = get_attr('render')
if not wrender:
raise SkinError("you must define a renderer with render= for source '%s'" % (wsource))
for converter in widget.findall("convert"):
ctype = converter.get('type')
assert ctype, "'convert'-tag needs a 'type'-attribute"
#print "Converter:", ctype
try:
parms = converter.text.strip()
except:
parms = ""
#print "Params:", parms
converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype)
c = None
for i in source.downstream_elements:
if isinstance(i, converter_class) and i.converter_arguments == parms:
c = i
if c is None:
c = converter_class(parms)
c.connect(source)
source = c
renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender)
renderer = renderer_class() # instantiate renderer
renderer.connect(source) # connect to source
attributes = renderer.skinAttributes = [ ]
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source'))
screen.renderer.append(renderer)
def process_applet(widget, context):
try:
codeText = widget.text.strip()
widgetType = widget.attrib.get('type')
code = compile(codeText, "skin applet", "exec")
except Exception, ex:
raise SkinError("applet failed to compile: " + str(ex))
if widgetType == "onLayoutFinish":
screen.onLayoutFinish.append(code)
else:
raise SkinError("applet type '%s' unknown!" % widgetType)
def process_elabel(widget, context):
w = additionalWidget()
w.widget = eLabel
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_epixmap(widget, context):
w = additionalWidget()
w.widget = ePixmap
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_screen(widget, context):
for w in widget.getchildren():
conditional = w.attrib.get('conditional')
if conditional and not [i for i in conditional.split(",") if i in screen.keys()]:
continue
p = processors.get(w.tag, process_none)
try:
p(w, context)
except SkinError, e:
print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e
def process_panel(widget, context):
n = widget.attrib.get('name')
if n:
try:
s = dom_screens[n]
except KeyError:
print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name)
else:
process_screen(s[0], context)
layout = widget.attrib.get('layout')
if layout == 'stack':
cc = SkinContextStack
else:
cc = SkinContext
try:
c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'))
except Exception, ex:
raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) )
process_screen(widget, c)
processors = {
None: process_none,
"widget": process_widget,
"applet": process_applet,
"eLabel": process_elabel,
"ePixmap": process_epixmap,
"panel": process_panel
}
try:
context.x = 0 # reset offsets, all components are relative to screen
context.y = 0 # coordinates.
process_screen(myscreen, context)
except Exception, e:
print "[Skin] SKIN ERROR in %s:" % name, e
from Components.GUIComponent import GUIComponent
nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)]
assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components))
# This may look pointless, but it unbinds 'screen' from the nested scope. A better
# solution is to avoid the nested scope above and use the context object to pass
# things around.
screen = None
visited_components = None
| schleichdi2/openpli-e2 | skin.py | Python | gpl-2.0 | 35,598 |
# @author: Milinda Fernando
# School of Computing, University of Utah.
# generate all the slurm jobs for the sc16 poster, energy measurements,
import argparse
from subprocess import call
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='slurm_pbs')
parser.add_argument('-p','--prefix', help='file prefix that you need to merge')
parser.add_argument('-s','--suffix',help='suffix of the file')
parser.add_argument('-n','--n',help='number of flies that you need to merge')
args=parser.parse_args()
tol_list=['0.000010','0.000100','0.001000','0.010000','0.100000','0.200000','0.300000','0.400000','0.500000']
#sendCommMap_M_tol_0.010000_npes_4096_pts_100000_ps_4096mat.csv
for tol in tol_list:
inFName=args.prefix+tol+args.suffix+'_'+args.n+'mat'+'.csv'
outFName=args.prefix+tol+args.suffix+'_'+args.n+'mat_comma'+'.csv'
fin=open(inFName,'r')
fout=open(outFName,'w')
for line in fin:
line=line.strip()
line=line.replace('\t',',')
fout.write(line+'\n')
fin.close()
fout.close()
print 'OK'
| paralab/Dendro4 | python_scripts_sc16/csv_mat.py | Python | gpl-2.0 | 1,046 |
#!/usr/bin/env python
"""
@package glider_utils
@file glider_utils.py
@author Stuart Pearce & Chris Wingard
@brief Module containing glider utiliities
"""
__author__ = 'Stuart Pearce & Chris Wingard'
__license__ = 'Apache 2.0'
import numpy as np
import warnings
#import pdb
import re
#import pygsw.vectors as gsw
class DbaDataParser(object):
"""
A class that parses a glider data file and holds it in dictionaries.
GliderParsedData parses a Slocum Electric Glider data file that has
been converted to ASCII from binary, and holds the self describing
header data in a header dictionary and the data in a data dictionary
using the column labels as the dictionary keys.
Construct an instance of GliderParsedData using the filename of the
ASCII file containing the glider data.
E.g.:
glider_data = GliderParsedData('glider_data_file.mbd')
glider_data.hdr_dict holds the header dictionary with the self
describing ASCII tags from the file as keys.
data_dict holds a data dictionary with the variable names (column
labels) as keys.
A sub-dictionary holds the name of the variable (same as the key),
the data units, the number of binary bytes used to store each
variable type, the name of the variable, and the data using the
keys:
'Name'
'Units'
'Number_of_Bytes'
'Data'
For example, to retrieve the data for 'variable_name':
vn_data = glider_data.data_dict['variable_name]['Data']
"""
def __init__(self, filename):
self._fid = open(filename, 'r')
self.hdr_dict = {}
self.data_dict = {}
self._read_header()
self._read_data()
self._fid.close()
def _read_header(self):
"""
Read in the self describing header lines of an ASCII glider data
file.
"""
# There are usually 14 header lines, start with 14,
# and check the 'num_ascii_tags' line.
num_hdr_lines = 14
header_pattern = r'(.*): (.*)$'
header_re = re.compile(header_pattern)
#pdb.set_trace()
hdr_line = 1
while hdr_line <= num_hdr_lines:
line = self._fid.readline()
match = header_re.match(line)
if match:
key = match.group(1)
value = match.group(2)
value = value.strip()
if 'num_ascii_tags' in key:
num_hdr_lines = int(value)
self.hdr_dict[key] = value
hdr_line += 1
def _read_data(self):
"""
Read in the column labels, data type, number of bytes of each
data type, and the data from an ASCII glider data file.
"""
column_labels = self._fid.readline().split()
column_type = self._fid.readline().split()
column_num_bytes = self._fid.readline().split()
# read each row of data & use np.array's ability to grab a
# column of an array
data = []
#pdb.set_trace()
for line in self._fid.readlines():
data.append(line.split())
data_array = np.array(data, dtype=np.float) # NOTE: this is an array of strings
# warn if # of described data rows != to amount read in.
num_columns = int(self.hdr_dict['sensors_per_cycle'])
if num_columns != data_array.shape[1]:
warnings.warn('Glider data file does not have the same' +
'number of columns as described in header.\n' +
'described %d, actual %d' % (num_columns,
data_array.shape[1])
)
# extract data to dictionary
for ii in range(num_columns):
units = column_type[ii]
data_col = data_array[:, ii]
self.data_dict[column_labels[ii]] = {
'Name': column_labels[ii],
'Units': units,
'Number_of_Bytes': int(column_num_bytes[ii]),
'Data': data_col
}
# change ISO lat or lon format to decimal degrees
if units == 'lat' or units == 'lon':
min_d100, deg = np.modf(data_col/100.)
deg_col = deg + (min_d100*100.)/60.
self.data_dict[column_labels[ii]]['Data_deg'] = deg_col
self.data_keys = column_labels
class DataVizDataParser(DbaDataParser):
"""
A class that parses a glider data file and holds it in dictionaries.
GliderParsedData parses a Slocum Electric Glider data file that has
been converted to ASCII from binary, and holds the self describing
header data in a header dictionary and the data in a data dictionary
using the column labels as the dictionary keys.
Construct an instance of GliderParsedData using the filename of the
ASCII file containing the glider data.
E.g.:
glider_data = GliderParsedData('glider_data_file.mbd')
glider_data.hdr_dict holds the header dictionary with the self
describing ASCII tags from the file as keys.
data_dict holds a data dictionary with the variable names (column
labels) as keys.
A sub-dictionary holds the name of the variable (same as the key),
the data units, the number of binary bytes used to store each
variable type, the name of the variable, and the data using the
keys:
'Name'
'Units'
'Number_of_Bytes'
'Data'
For example, to retrieve the data for 'variable_name':
vn_data = glider_data.data_dict['variable_name]['Data']
"""
def _read_header(self):
pass
def _read_data(self):
"""
Read in the column labels, data type/units, and the data from an Data Visualizer data file.
"""
filename_hdr = self._fid.readline()
column_labels = self._fid.readline().split()
column_type = self._fid.readline().split()
#column_num_bytes = self._fid.readline().split()
# read each row of data & use np.array's ability to grab a
# column of an array
data = []
for line in self._fid.readlines():
data.append(line.split())
data_array = np.array(data) # NOTE: can't make floats because of lat & lon
num_columns = len(column_labels)
# extract data to dictionary
for ii in range(num_columns):
self.data_dict[column_labels[ii]] = {
'Name': column_labels[ii],
'Units': column_type[ii],
#'Number_of_Bytes': int(column_num_bytes[ii]),
'Data': data_array[:, ii]
}
self.data_keys = column_labels
class GliderData(dict):
""" An object specifically to store Slocum glider data.
"""
def __init__():
dict.__init__ | s-pearce/glider-utilities | glider_utils/parsers/dbd_parsers.py | Python | gpl-2.0 | 6,823 |
#!/usr/bin/python
import os, struct, array
from fcntl import ioctl
SDL_JOY_0_SELECT = 8
SDL_JOY_0_START = 9
SDL_JOY_0_TRIGGER1 = 0
SDL_JOY_0_TRIGGER2 = 1
SDL_JOY_0_ASTERISK = 2
SDL_JOY_0_HASH = 3
SDL_JOY_0_SECOND_AXIS = 2
# Iterate over the joystick devices.
# print('Available devices:')
devices = sorted(os.listdir('/dev/input'))
joysticks = []
for fn in devices:
if fn.startswith('js'):
# print(' /dev/input/%s' % fn)
joysticks.append("/dev/input/%s" % fn)
joysticks = sorted(joysticks)
print "First joystick is %s" % joysticks[0]
# Open the joystick device.
fn = joysticks[0]
# print('Opening %s...' % fn)
jsdev = open(fn, 'rb')
buf = array.array('c', ['\0'] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = ("%s" % buf.tostring()).partition(b'\0')[0]
# print('Device name: %s' % js_name)
jsdev.close()
js_cfg = "/opt/retropie/configs/all/retroarch-joypads/%s.cfg" % js_name.replace(" ", "")
print "Getting Retroarch configuration for %s" % js_cfg
# print(js_cfg)
f = open("%s" % js_cfg, "r")
content = f.read()
lines = content.split("\n")
for line in lines:
if line:
p = line.replace(" ", "").split("=")
# print "Processing %s" % p[0]
if p[0] == "input_select_btn":
SDL_JOY_0_SELECT = p[1].replace('"', '')
elif p[0] == "input_start_btn":
SDL_JOY_0_START = p[1].replace('"', '')
elif p[0] == "input_a_btn":
SDL_JOY_0_TRIGGER1 = p[1].replace('"', '')
elif p[0] == "input_b_btn":
SDL_JOY_0_TRIGGER2 = p[1].replace('"', '')
elif p[0] == "input_x_btn":
SDL_JOY_0_ASTERISK = p[1].replace('"', '')
elif p[0] == "input_y_btn":
SDL_JOY_0_HASH = p[1].replace('"', '')
elif p[0] == "input_r_x_minus_axis":
SDL_JOY_0_SECOND_AXIS = p[1].replace('"', '').replace("-", "")
f.close()
atari800_cfg = "/home/pi/.atari800.cfg"
print "Updating configuration in %s with" % atari800_cfg
print "SDL_JOY_0_SELECT=%s" % SDL_JOY_0_SELECT
print "SDL_JOY_0_START=%s" % SDL_JOY_0_START
print "SDL_JOY_0_TRIGGER1=%s" % SDL_JOY_0_TRIGGER1
print "SDL_JOY_0_TRIGGER2=%s" % SDL_JOY_0_TRIGGER2
print "SDL_JOY_0_ASTERISK=%s" % SDL_JOY_0_ASTERISK
print "SDL_JOY_0_HASH=%s" % SDL_JOY_0_HASH
print "SDL_JOY_0_SECOND_AXIS=%s" % SDL_JOY_0_SECOND_AXIS
f = open("%s" % atari800_cfg, "r")
content = f.read()
f.close()
new_data = ""
lines = content.split("\n")
for line in lines:
if line.startswith("SDL_JOY_0_SELECT"):
line = "SDL_JOY_0_SELECT=%s" % SDL_JOY_0_SELECT
elif line.startswith("SDL_JOY_0_START"):
line = "SDL_JOY_0_START=%s" % SDL_JOY_0_START
elif line.startswith("SDL_JOY_0_TRIGGER1"):
line = "SDL_JOY_0_TRIGGER1=%s" % SDL_JOY_0_TRIGGER1
elif line.startswith("SDL_JOY_0_TRIGGER2"):
line = "SDL_JOY_0_TRIGGER2=%s" % SDL_JOY_0_TRIGGER2
elif line.startswith("SDL_JOY_0_ASTERISK"):
line = "SDL_JOY_0_ASTERISK=%s" % SDL_JOY_0_ASTERISK
elif line.startswith("SDL_JOY_0_HASH"):
line = "SDL_JOY_0_HASH=%s" % SDL_JOY_0_HASH
elif line.startswith("SDL_JOY_0_SECOND_AXIS"):
line = "SDL_JOY_0_SECOND_AXIS=%s" % SDL_JOY_0_SECOND_AXIS
new_data += line + "\n"
# print new_data
f = open("%s" % atari800_cfg, 'w')
f.write(new_data)
f.close()
| jfroco/atari800-rpi | atari5200.py | Python | gpl-2.0 | 3,320 |
# -*- coding: utf-8 -*-
#
import uuid
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView, Response
from rest_framework.generics import (
ListAPIView, get_object_or_404, RetrieveAPIView
)
from common.permissions import IsOrgAdminOrAppUser, IsOrgAdmin
from common.utils import get_logger
from ...utils import (
AssetPermissionUtilV2
)
from ...hands import User, Asset, SystemUser
from ... import serializers
from ...models import Action
from .mixin import UserAssetPermissionMixin
logger = get_logger(__name__)
__all__ = [
'RefreshAssetPermissionCacheApi',
'UserGrantedAssetSystemUsersApi',
'ValidateUserAssetPermissionApi',
'GetUserAssetPermissionActionsApi',
]
class GetUserAssetPermissionActionsApi(UserAssetPermissionMixin,
RetrieveAPIView):
permission_classes = (IsOrgAdminOrAppUser,)
serializer_class = serializers.ActionsSerializer
def get_obj(self):
user_id = self.request.query_params.get('user_id', '')
user = get_object_or_404(User, id=user_id)
return user
def get_object(self):
asset_id = self.request.query_params.get('asset_id', '')
system_id = self.request.query_params.get('system_user_id', '')
try:
asset_id = uuid.UUID(asset_id)
system_id = uuid.UUID(system_id)
except ValueError:
return Response({'msg': False}, status=403)
asset = get_object_or_404(Asset, id=asset_id)
system_user = get_object_or_404(SystemUser, id=system_id)
system_users_actions = self.util.get_asset_system_users_with_actions(asset)
actions = system_users_actions.get(system_user)
return {"actions": actions}
class ValidateUserAssetPermissionApi(UserAssetPermissionMixin, APIView):
permission_classes = (IsOrgAdminOrAppUser,)
def get_obj(self):
user_id = self.request.query_params.get('user_id', '')
user = get_object_or_404(User, id=user_id)
return user
def get(self, request, *args, **kwargs):
asset_id = request.query_params.get('asset_id', '')
system_id = request.query_params.get('system_user_id', '')
action_name = request.query_params.get('action_name', '')
try:
asset_id = uuid.UUID(asset_id)
system_id = uuid.UUID(system_id)
except ValueError:
return Response({'msg': False}, status=403)
asset = get_object_or_404(Asset, id=asset_id)
system_user = get_object_or_404(SystemUser, id=system_id)
system_users_actions = self.util.get_asset_system_users_with_actions(
asset)
actions = system_users_actions.get(system_user)
if action_name in Action.value_to_choices(actions):
return Response({'msg': True}, status=200)
return Response({'msg': False}, status=403)
class RefreshAssetPermissionCacheApi(RetrieveAPIView):
permission_classes = (IsOrgAdmin,)
def retrieve(self, request, *args, **kwargs):
AssetPermissionUtilV2.expire_all_user_tree_cache()
return Response({'msg': True}, status=200)
class UserGrantedAssetSystemUsersApi(UserAssetPermissionMixin, ListAPIView):
permission_classes = (IsOrgAdminOrAppUser,)
serializer_class = serializers.AssetSystemUserSerializer
only_fields = serializers.AssetSystemUserSerializer.Meta.only_fields
def get_queryset(self):
asset_id = self.kwargs.get('asset_id')
asset = get_object_or_404(Asset, id=asset_id)
system_users_with_actions = self.util.get_asset_system_users_with_actions(asset)
system_users = []
for system_user, actions in system_users_with_actions.items():
system_user.actions = actions
system_users.append(system_user)
system_users.sort(key=lambda x: x.priority)
return system_users
| sdgdsffdsfff/jumpserver | apps/perms/api/user_permission/common.py | Python | gpl-2.0 | 3,904 |
"""This module contains tests that exercise the canned VMware Automate stuff."""
from textwrap import dedent
import fauxfactory
import pytest
from widgetastic.widget import View
from widgetastic_patternfly import Dropdown
from cfme import test_requirements
from cfme.common import BaseLoggedInPage
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.automate,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.long_running,
pytest.mark.ignore_stream("upstream"),
pytest.mark.tier(3),
pytest.mark.provider(
[VMwareProvider], required_fields=[['provisioning', 'template']],
scope="module")
]
@pytest.fixture(scope="module")
def cls(domain):
original_class = domain.parent\
.instantiate(name='ManageIQ')\
.namespaces.instantiate(name='System')\
.classes.instantiate(name='Request')
original_class.copy_to(domain=domain)
return domain.namespaces.instantiate(name='System').classes.instantiate(name='Request')
@pytest.fixture(scope="module")
def testing_group(appliance):
group_desc = fauxfactory.gen_alphanumeric()
group = appliance.collections.button_groups.create(
text=group_desc,
hover=group_desc,
type=appliance.collections.button_groups.VM_INSTANCE
)
yield group
group.delete_if_exists()
@pytest.fixture(scope="function")
def testing_vm(setup_provider, provider):
collection = provider.appliance.provider_based_collection(provider)
try:
template_name = provider.data['templates']['full_template']['name']
except KeyError:
pytest.skip('Unable to identify full_template for provider: {}'.format(provider))
vm = collection.instantiate(
random_vm_name("ae-hd"),
provider,
template_name=template_name
)
try:
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
yield vm
finally:
vm.cleanup_on_provider()
def test_vmware_vimapi_hotadd_disk(
appliance, request, testing_group, testing_vm, domain, cls):
"""Tests hot adding a disk to vmware vm. This test exercises the `VMware_HotAdd_Disk` method,
located in `/Integration/VMware/VimApi`
Polarion:
assignee: ghubale
initialEstimate: 1/8h
casecomponent: Automate
caseimportance: critical
tags: automate
testSteps:
1. It creates an instance in ``System/Request`` that can be accessible from eg. button
2. Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``.
The button shall belong in the VM and instance button group.
3. After the button is created, it goes to a VM's summary page, clicks the button.
4. The test waits until the capacity of disks is raised.
Bugzilla:
1211627
1311221
"""
meth = cls.methods.create(
name=fauxfactory.gen_alpha(15, start="load_value_"),
script=dedent('''\
# Sets the capacity of the new disk.
$evm.root['size'] = 1 # GB
exit MIQ_OK
'''))
request.addfinalizer(meth.delete_if_exists)
# Instance that calls the method and is accessible from the button
instance = cls.instances.create(
name=fauxfactory.gen_alpha(23, start="VMware_HotAdd_Disk_"),
fields={
"meth4": {'value': meth.name}, # To get the value
"rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"},
},
)
request.addfinalizer(instance.delete_if_exists)
# Button that will invoke the dialog and action
button_name = fauxfactory.gen_alphanumeric()
button = testing_group.buttons.create(
text=button_name,
hover=button_name,
system="Request",
request=instance.name)
request.addfinalizer(button.delete_if_exists)
def _get_disk_capacity():
view = testing_vm.load_details(refresh=True)
return view.entities.summary('Datastore Allocation Summary').get_text_of('Total Allocation')
original_disk_capacity = _get_disk_capacity()
logger.info('Initial disk allocation: %s', original_disk_capacity)
class CustomButtonView(View):
custom_button = Dropdown(testing_group.text)
view = appliance.browser.create_view(CustomButtonView)
view.custom_button.item_select(button.text)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_no_error()
try:
wait_for(
lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5)
finally:
logger.info('End disk capacity: %s', _get_disk_capacity())
| izapolsk/integration_tests | cfme/tests/automate/test_vmware_methods.py | Python | gpl-2.0 | 4,838 |
# coding: utf-8
'''
Created on 17/2/2015
@author: PC06
'''
from include import app
from flask.templating import render_template
from ec.edu.itsae.dao import PersonaDAO
@app.route("/")
def login():
return render_template("login.html")
@app.route("/persona")
def index():
x=PersonaDAO.PersonaDAO().reportarPersona()
print x
return render_template("index.html", dato=x) | rfedmi/reposdmpdos | davidmp/mainIndex.py | Python | gpl-2.0 | 385 |
"""
Helper and server modules
"""
| freevo/freevo1 | src/helpers/__init__.py | Python | gpl-2.0 | 34 |
# -*- python -*-
# Copyright (C) 2009 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/opt/codesourcery/arm-none-eabi/share/gcc-4.5.1/python'
libdir = '/opt/codesourcery/arm-none-eabi/lib/armv6-m'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir in sys.path:
sys.path.insert(0, dir)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| GeeteshKhatavkar/gh0st_kernel_samsung_royxx | arm-2010.09/arm-none-eabi/lib/armv6-m/libstdc++.a-gdb.py | Python | gpl-2.0 | 2,346 |
import urllib
from models.vipsubscriber import VipSubscriber
from base import BaseHandler
class Vip(BaseHandler):
LOCATION = "../views/vip.html"
def GetContext(self):
tContext = {}
tVipList = []
tVipKey = urllib.unquote(self.request.get('key'))
if(tVipKey != None and len(tVipKey) > 0):
tVip = VipSubscriber.get(tVipKey)
tContext['tVip'] = tVip
return tContext
def PostContext(self):
tContext = {}
tVip = VipSubscriber()
tVipForumName = urllib.unquote(self.request.get('forumname'))
tVipKey = urllib.unquote(self.request.get('key'))
if(tVipKey != None and len(tVipKey) > 0):
tVip = VipSubscriber.get(tVipKey)
tContext['tVip'] = tVip
if(tVipForumName != None and len(tVipForumName) > 0):
tVip.subscriberForumName = tVipForumName
tVip.put()
return tContext | Kenneth-Posey/kens-old-projects | smokin-goldshop/handler/vip.py | Python | gpl-2.0 | 985 |
from django.contrib import admin
from edc_registration.models import RegisteredSubject
from edc_locator.admin import BaseLocatorModelAdmin
from ..forms import SubjectLocatorForm
from ..models import SubjectLocator
class SubjectLocatorAdmin(BaseLocatorModelAdmin):
form = SubjectLocatorForm
fields = (
'registered_subject',
'report_datetime',
'date_signed',
'mail_address',
'home_visit_permission',
'physical_address',
'may_follow_up',
'subject_cell',
'subject_cell_alt',
'subject_phone',
'subject_phone_alt',
'may_call_work',
'subject_work_place',
'subject_work_phone',
'may_contact_someone',
'contact_name',
'contact_rel',
'contact_physical_address',
'contact_cell',
'contact_phone',
'successful_mode_of_contact')
list_display = ('may_follow_up', 'may_call_work')
list_filter = ('may_follow_up', 'may_call_work')
search_fields = (
'registered_subject__subject_identifier', 'subject_cell', 'subject_cell_alt',
'subject_phone', 'subject_phone_alt', 'subject_work_place', 'subject_work_phone')
radio_fields = {"home_visit_permission": admin.VERTICAL,
"may_follow_up": admin.VERTICAL,
"may_call_work": admin.VERTICAL,
"may_contact_someone": admin.VERTICAL,
'successful_mode_of_contact': admin.VERTICAL}
actions = [] # do not allow export to CSV
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "registered_subject":
kwargs["queryset"] = RegisteredSubject.objects.filter(id__exact=request.GET.get('registered_subject', 0))
return super(SubjectLocatorAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(SubjectLocator, SubjectLocatorAdmin)
| botswana-harvard/bcvp | bcvp/bcvp_subject/admin/subject_locator_admin.py | Python | gpl-2.0 | 1,944 |
import glob
import os
from distutils.dep_util import newer
from distutils.core import Command
from distutils.spawn import find_executable
from distutils.util import change_root
class build_gschemas(Command):
"""build message catalog files
Build message catalog (.mo) files from .po files using xgettext
and intltool. These are placed directly in the build tree.
"""
description = "build gschemas used for dconf"
user_options = []
build_base = None
def initialize_options(self):
pass
def finalize_options(self):
self.gschemas_directory = self.distribution.gschemas
self.set_undefined_options('build', ('build_base', 'build_base'))
def run(self):
if find_executable("glib-compile-schemas") is None:
raise SystemExit("Error: 'glib-compile-schemas' not found.")
basepath = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
self.copy_tree(self.gschemas_directory, basepath)
class install_gschemas(Command):
"""install message catalog files
Copy compiled message catalog files into their installation
directory, $prefix/share/locale/$lang/LC_MESSAGES/$package.mo.
"""
description = "install message catalog files"
user_options = []
skip_build = None
build_base = None
install_base = None
root = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('build', ('build_base', 'build_base'))
self.set_undefined_options(
'install',
('root', 'root'),
('install_base', 'install_base'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_gschemas')
src = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
dest = os.path.join(self.install_base, 'share', 'glib-2.0', 'schemas')
if self.root != None:
dest = change_root(self.root, dest)
self.copy_tree(src, dest)
self.spawn(['glib-compile-schemas', dest])
__all__ = ["build_gschemas", "install_gschemas"]
| nagisa/Feeds | gdist/gschemas.py | Python | gpl-2.0 | 2,161 |
# This file is part of MyPaint.
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2019 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Device specific settings and configuration"""
## Imports
from __future__ import division, print_function
import logging
import collections
import re
from lib.gettext import C_
from lib.gibindings import Gtk
from lib.gibindings import Gdk
from lib.gibindings import Pango
from lib.observable import event
import gui.application
import gui.mode
logger = logging.getLogger(__name__)
## Device prefs
# The per-device settings are stored in the prefs in a sub-dict whose
# string keys are formed from the device name and enough extra
# information to (hopefully) identify the device uniquely. Names are not
# unique, and IDs vary according to the order in which you plug devices
# in. So for now, our unique strings use a combination of the device's
# name, its source as presented by GDK, and the number of axes.
_PREFS_ROOT = "input.devices"
_PREFS_DEVICE_SUBKEY_FMT = "{name}:{source}:{num_axes}"
## Device type strings
_DEVICE_TYPE_STRING = {
Gdk.InputSource.CURSOR: C_(
"prefs: device's type label",
"Cursor/puck",
),
Gdk.InputSource.ERASER: C_(
"prefs: device's type label",
"Eraser",
),
Gdk.InputSource.KEYBOARD: C_(
"prefs: device's type label",
"Keyboard",
),
Gdk.InputSource.MOUSE: C_(
"prefs: device's type label",
"Mouse",
),
Gdk.InputSource.PEN: C_(
"prefs: device's type label",
"Pen",
),
Gdk.InputSource.TOUCHPAD: C_(
"prefs: device's type label",
"Touchpad",
),
Gdk.InputSource.TOUCHSCREEN: C_(
"prefs: device's type label",
"Touchscreen",
),
}
## Settings consts and classes
class AllowedUsage:
"""Consts describing how a device may interact with the canvas"""
ANY = "any" #: Device can be used for any tasks.
NOPAINT = "nopaint" #: No direct painting, but can manipulate objects.
NAVONLY = "navonly" #: Device can only be used for navigation.
IGNORED = "ignored" #: Device cannot interact with the canvas at all.
VALUES = (ANY, IGNORED, NOPAINT, NAVONLY)
DISPLAY_STRING = {
IGNORED: C_(
"device settings: allowed usage",
u"Ignore",
),
ANY: C_(
"device settings: allowed usage",
u"Any Task",
),
NOPAINT: C_(
"device settings: allowed usage",
u"Non-painting tasks",
),
NAVONLY: C_(
"device settings: allowed usage",
u"Navigation only",
),
}
BEHAVIOR_MASK = {
ANY: gui.mode.Behavior.ALL,
IGNORED: gui.mode.Behavior.NONE,
NOPAINT: gui.mode.Behavior.NON_PAINTING,
NAVONLY: gui.mode.Behavior.CHANGE_VIEW,
}
class ScrollAction:
"""Consts describing how a device's scroll events should be used.
The user can assign one of these values to a device to configure
whether they'd prefer panning or scrolling for unmodified scroll
events. This setting can be queried via the device monitor.
"""
ZOOM = "zoom" #: Alter the canvas scaling
PAN = "pan" #: Pan across the canvas
VALUES = (ZOOM, PAN)
DISPLAY_STRING = {
ZOOM: C_("device settings: unmodified scroll action", u"Zoom"),
PAN: C_("device settings: unmodified scroll action", u"Pan"),
}
class Settings (object):
"""A device's settings"""
DEFAULT_USAGE = AllowedUsage.VALUES[0]
DEFAULT_SCROLL = ScrollAction.VALUES[0]
def __init__(self, prefs, usage=DEFAULT_USAGE, scroll=DEFAULT_SCROLL):
super(Settings, self).__init__()
self._usage = self.DEFAULT_USAGE
self._update_usage_mask()
self._scroll = self.DEFAULT_SCROLL
self._prefs = prefs
self._load_from_prefs()
@property
def usage(self):
return self._usage
@usage.setter
def usage(self, value):
if value not in AllowedUsage.VALUES:
raise ValueError("Unrecognized usage value")
self._usage = value
self._update_usage_mask()
self._save_to_prefs()
@property
def usage_mask(self):
return self._usage_mask
@property
def scroll(self):
return self._scroll
@scroll.setter
def scroll(self, value):
if value not in ScrollAction.VALUES:
raise ValueError("Unrecognized scroll value")
self._scroll = value
self._save_to_prefs()
def _load_from_prefs(self):
usage = self._prefs.get("usage", self.DEFAULT_USAGE)
if usage not in AllowedUsage.VALUES:
usage = self.DEFAULT_USAGE
self._usage = usage
scroll = self._prefs.get("scroll", self.DEFAULT_SCROLL)
if scroll not in ScrollAction.VALUES:
scroll = self.DEFAULT_SCROLL
self._scroll = scroll
self._update_usage_mask()
def _save_to_prefs(self):
self._prefs.update({
"usage": self._usage,
"scroll": self._scroll,
})
def _update_usage_mask(self):
self._usage_mask = AllowedUsage.BEHAVIOR_MASK[self._usage]
## Main class defs
class Monitor (object):
"""Monitors device use & plugging, and manages their configuration
An instance resides in the main application. It is responsible for
monitoring known devices, determining their characteristics, and
storing their settings. Per-device settings are stored in the main
application preferences.
"""
def __init__(self, app):
"""Initializes, assigning initial input device uses
:param app: the owning Application instance.
:type app: gui.application.Application
"""
super(Monitor, self).__init__()
self._app = app
if app is not None:
self._prefs = app.preferences
else:
self._prefs = {}
if _PREFS_ROOT not in self._prefs:
self._prefs[_PREFS_ROOT] = {}
# Transient device information
self._device_settings = collections.OrderedDict() # {dev: settings}
self._last_event_device = None
self._last_pen_device = None
disp = Gdk.Display.get_default()
mgr = disp.get_device_manager()
mgr.connect("device-added", self._device_added_cb)
mgr.connect("device-removed", self._device_removed_cb)
self._device_manager = mgr
for physical_device in mgr.list_devices(Gdk.DeviceType.SLAVE):
self._init_device_settings(physical_device)
## Devices list
def get_device_settings(self, device):
"""Gets the settings for a device
:param Gdk.Device device: a physical ("slave") device
:returns: A settings object which can be manipulated, or None
:rtype: Settings
Changes to the returned object made via its API are saved to the
user preferences immediately.
If the device is a keyboard, or is otherwise unsuitable as a
pointing device, None is returned instead. The caller needs to
check this case.
"""
return (self._device_settings.get(device)
or self._init_device_settings(device))
def _init_device_settings(self, device):
"""Ensures that the device settings are loaded for a device"""
source = device.get_source()
if source == Gdk.InputSource.KEYBOARD:
return
num_axes = device.get_n_axes()
if num_axes < 2:
return
settings = self._device_settings.get(device)
if not settings:
try:
vendor_id = device.get_vendor_id()
product_id = device.get_product_id()
except AttributeError:
# New in GDK 3.16
vendor_id = "?"
product_id = "?"
logger.info(
"New device %r"
" (%s, axes:%d, class=%s, vendor=%r, product=%r)",
device.get_name(),
source.value_name,
num_axes,
device.__class__.__name__,
vendor_id,
product_id,
)
dev_prefs_key = _device_prefs_key(device)
dev_prefs = self._prefs[_PREFS_ROOT].setdefault(dev_prefs_key, {})
settings = Settings(dev_prefs)
self._device_settings[device] = settings
self.devices_updated()
assert settings is not None
return settings
def _device_added_cb(self, mgr, device):
"""Informs that a device has been plugged in"""
logger.debug("device-added %r", device.get_name())
self._init_device_settings(device)
def _device_removed_cb(self, mgr, device):
"""Informs that a device has been unplugged"""
logger.debug("device-removed %r", device.get_name())
self._device_settings.pop(device, None)
self.devices_updated()
@event
def devices_updated(self):
"""Event: the devices list was changed"""
def get_devices(self):
"""Yields devices and their settings, for UI stuff
:rtype: iterator
:returns: ultimately a sequence of (Gdk.Device, Settings) pairs
"""
for device, settings in self._device_settings.items():
yield (device, settings)
## Current device
@event
def current_device_changed(self, old_device, new_device):
"""Event: the current device has changed
:param Gdk.Device old_device: Previous device used
:param Gdk.Device new_device: New device used
"""
def device_used(self, device):
"""Informs about a device being used, for use by controllers
:param Gdk.Device device: the device being used
:returns: whether the device changed
If the device has changed, this method then notifies interested
parties via the device_changed observable @event.
This method returns True if the device was the same as the previous
device, and False if it has changed.
"""
if not self.get_device_settings(device):
return False
if device == self._last_event_device:
return True
self.current_device_changed(self._last_event_device, device)
old_device = self._last_event_device
new_device = device
self._last_event_device = device
# small problem with this code: it doesn't work well with brushes that
# have (eraser not in [1.0, 0.0])
new_device.name = new_device.props.name
new_device.source = new_device.props.input_source
logger.debug(
"Device change: name=%r source=%s",
new_device.name, new_device.source.value_name,
)
# When editing brush settings, it is often more convenient to use the
# mouse. Because of this, we don't restore brushsettings when switching
# to/from the mouse. We act as if the mouse was identical to the last
# active pen device.
if (new_device.source == Gdk.InputSource.MOUSE and
self._last_pen_device):
new_device = self._last_pen_device
if new_device.source == Gdk.InputSource.PEN:
self._last_pen_device = new_device
if (old_device and old_device.source == Gdk.InputSource.MOUSE and
self._last_pen_device):
old_device = self._last_pen_device
bm = self._app.brushmanager
if old_device:
# Clone for saving
old_brush = bm.clone_selected_brush(name=None)
bm.store_brush_for_device(old_device.name, old_brush)
if new_device.source == Gdk.InputSource.MOUSE:
# Avoid fouling up unrelated devbrushes at stroke end
self._prefs.pop('devbrush.last_used', None)
else:
# Select the brush and update the UI.
# Use a sane default if there's nothing associated
# with the device yet.
brush = bm.fetch_brush_for_device(new_device.name)
if brush is None:
if device_is_eraser(new_device):
brush = bm.get_default_eraser()
else:
brush = bm.get_default_brush()
self._prefs['devbrush.last_used'] = new_device.name
bm.select_brush(brush)
class SettingsEditor (Gtk.Grid):
"""Per-device settings editor"""
## Class consts
_USAGE_CONFIG_COL = 0
_USAGE_STRING_COL = 1
_SCROLL_CONFIG_COL = 0
_SCROLL_STRING_COL = 1
__gtype_name__ = "MyPaintDeviceSettingsEditor"
## Initialization
def __init__(self, monitor=None):
"""Initialize
:param Monitor monitor: monitor instance (for testing)
By default, the central app's `device_monitor` is used to permit
parameterless construction.
"""
super(SettingsEditor, self).__init__()
if monitor is None:
app = gui.application.get_app()
monitor = app.device_monitor
self._monitor = monitor
self._devices_store = Gtk.ListStore(object)
self._devices_view = Gtk.TreeView(model=self._devices_store)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is the device's name
"Device",
))
col.set_min_width(200)
col.set_expand(True)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
cell.set_property("ellipsize", Pango.EllipsizeMode.MIDDLE)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_name_datafunc)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is the number of axes (an integer)
"Axes",
))
col.set_min_width(30)
col.set_resizable(True)
col.set_expand(False)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_axes_datafunc)
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column shows type labels ("Touchscreen", "Pen" etc.)
"Type",
))
col.set_min_width(120)
col.set_resizable(True)
col.set_expand(False)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self._devices_view.append_column(col)
cell = Gtk.CellRendererText()
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_type_datafunc)
# Usage config value => string store (dropdowns)
store = Gtk.ListStore(str, str)
for conf_val in AllowedUsage.VALUES:
string = AllowedUsage.DISPLAY_STRING[conf_val]
store.append([conf_val, string])
self._usage_store = store
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is a dropdown allowing the allowed
# TRANSLATORS: tasks for the row's device to be configured.
u"Use for…",
))
col.set_min_width(100)
col.set_resizable(True)
col.set_expand(False)
self._devices_view.append_column(col)
cell = Gtk.CellRendererCombo()
cell.set_property("model", self._usage_store)
cell.set_property("text-column", self._USAGE_STRING_COL)
cell.set_property("mode", Gtk.CellRendererMode.EDITABLE)
cell.set_property("editable", True)
cell.set_property("has-entry", False)
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
cell.connect("changed", self._usage_cell_changed_cb)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_usage_datafunc)
# Scroll action config value => string store (dropdowns)
store = Gtk.ListStore(str, str)
for conf_val in ScrollAction.VALUES:
string = ScrollAction.DISPLAY_STRING[conf_val]
store.append([conf_val, string])
self._scroll_store = store
col = Gtk.TreeViewColumn(C_(
"prefs: devices table: column header",
# TRANSLATORS: Column's data is a dropdown for how the device's
# TRANSLATORS: scroll wheel or scroll-gesture events are to be
# TRANSLATORS: interpreted normally.
u"Scroll…",
))
col.set_min_width(100)
col.set_resizable(True)
col.set_expand(False)
self._devices_view.append_column(col)
cell = Gtk.CellRendererCombo()
cell.set_property("model", self._scroll_store)
cell.set_property("text-column", self._USAGE_STRING_COL)
cell.set_property("mode", Gtk.CellRendererMode.EDITABLE)
cell.set_property("editable", True)
cell.set_property("has-entry", False)
cell.set_property("ellipsize", Pango.EllipsizeMode.END)
cell.connect("changed", self._scroll_cell_changed_cb)
col.pack_start(cell, True)
col.set_cell_data_func(cell, self._device_scroll_datafunc)
# Pretty borders
view_scroll = Gtk.ScrolledWindow()
view_scroll.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
pol = Gtk.PolicyType.AUTOMATIC
view_scroll.set_policy(pol, pol)
view_scroll.add(self._devices_view)
view_scroll.set_hexpand(True)
view_scroll.set_vexpand(True)
self.attach(view_scroll, 0, 0, 1, 1)
self._update_devices_store()
self._monitor.devices_updated += self._update_devices_store
## Display and sort funcs
def _device_name_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
cell.set_property("text", device.get_name())
def _device_axes_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
n_axes = device.get_n_axes()
cell.set_property("text", "%d" % (n_axes,))
def _device_type_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
source = device.get_source()
text = _DEVICE_TYPE_STRING.get(source, source.value_nick)
cell.set_property("text", text)
def _device_usage_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
text = AllowedUsage.DISPLAY_STRING[settings.usage]
cell.set_property("text", text)
def _device_scroll_datafunc(self, column, cell, model, iter_, *data):
device = model.get_value(iter_, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
text = ScrollAction.DISPLAY_STRING[settings.scroll]
cell.set_property("text", text)
## Updates
def _usage_cell_changed_cb(self, combo, device_path_str,
usage_iter, *etc):
config = self._usage_store.get_value(
usage_iter,
self._USAGE_CONFIG_COL,
)
device_iter = self._devices_store.get_iter(device_path_str)
device = self._devices_store.get_value(device_iter, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
settings.usage = config
self._devices_view.columns_autosize()
def _scroll_cell_changed_cb(self, conf_combo, device_path_str,
conf_iter, *etc):
conf_store = self._scroll_store
conf_col = self._SCROLL_CONFIG_COL
conf_value = conf_store.get_value(conf_iter, conf_col)
device_store = self._devices_store
device_iter = device_store.get_iter(device_path_str)
device = device_store.get_value(device_iter, 0)
settings = self._monitor.get_device_settings(device)
if not settings:
return
settings.scroll = conf_value
self._devices_view.columns_autosize()
def _update_devices_store(self, *_ignored):
"""Repopulates the displayed list"""
updated_list = list(self._monitor.get_devices())
updated_list_map = dict(updated_list)
paths_for_removal = []
devices_retained = set()
for row in self._devices_store:
device, = row
if device not in updated_list_map:
paths_for_removal.append(row.path)
continue
devices_retained.add(device)
for device, config in updated_list:
if device in devices_retained:
continue
self._devices_store.append([device])
for unwanted_row_path in reversed(paths_for_removal):
unwanted_row_iter = self._devices_store.get_iter(unwanted_row_path)
self._devices_store.remove(unwanted_row_iter)
self._devices_view.queue_draw()
## Helper funcs
def _device_prefs_key(device):
"""Returns the subkey to use in the app prefs for a device"""
source = device.get_source()
name = device.get_name()
n_axes = device.get_n_axes()
return u"%s:%s:%d" % (name, source.value_nick, n_axes)
def device_is_eraser(device):
"""Tests whether a device appears to be an eraser"""
if device is None:
return False
if device.get_source() == Gdk.InputSource.ERASER:
return True
if re.search(r'\<eraser\>', device.get_name(), re.I):
return True
return False
## Testing
def _test():
"""Interactive UI testing for SettingsEditor and Monitor"""
logging.basicConfig(level=logging.DEBUG)
win = Gtk.Window()
win.set_title("gui.device.SettingsEditor")
win.set_default_size(500, 400)
win.connect("destroy", Gtk.main_quit)
monitor = Monitor(app=None)
editor = SettingsEditor(monitor)
win.add(editor)
win.show_all()
Gtk.main()
print(monitor._prefs)
if __name__ == '__main__':
_test()
| mypaint/mypaint | gui/device.py | Python | gpl-2.0 | 22,624 |
__author__ = 'bromix'
from .base_item import BaseItem
class AudioItem(BaseItem):
def __init__(self, name, uri, image=u'', fanart=u''):
BaseItem.__init__(self, name, uri, image, fanart)
self._duration = None
self._track_number = None
self._year = None
self._genre = None
self._album = None
self._artist = None
self._title = name
self._rating = None
def set_rating(self, rating):
self._rating = float(rating)
def get_rating(self):
return self._rating
def set_title(self, title):
self._title = unicode(title)
def get_title(self):
return self._title
def set_artist_name(self, artist_name):
self._artist = unicode(artist_name)
def get_artist_name(self):
return self._artist
def set_album_name(self, album_name):
self._album = unicode(album_name)
def get_album_name(self):
return self._album
def set_genre(self, genre):
self._genre = unicode(genre)
def get_genre(self):
return self._genre
def set_year(self, year):
self._year = int(year)
def set_year_from_datetime(self, date_time):
self.set_year(date_time.year)
def get_year(self):
return self._year
def set_track_number(self, track_number):
self._track_number = int(track_number)
def get_track_number(self):
return self._track_number
def set_duration_from_milli_seconds(self, milli_seconds):
self.set_duration_from_seconds(int(milli_seconds) / 1000)
def set_duration_from_seconds(self, seconds):
self._duration = int(seconds)
def set_duration_from_minutes(self, minutes):
self.set_duration_from_seconds(int(minutes) * 60)
def get_duration(self):
return self._duration
| repotvsupertuga/tvsupertuga.repository | plugin.video.youtube/resources/lib/youtube_plugin/kodion/items/audio_item.py | Python | gpl-2.0 | 1,840 |
"""
WSGI config for octo_nemesis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "octo_nemesis.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| monkeywidget/massive-octo-nemesis | octo_nemesis/octo_nemesis/wsgi.py | Python | gpl-2.0 | 399 |
from django import template
from ..models import Grade
register = template.Library()
@register.filter
def to_grade_number(year):
"""Returns a `Grade` object for a year."""
return Grade(year).number
@register.filter
def to_grade_name(year):
"""Returns a `Grade` object for a year."""
return Grade(year).name
| tjcsl/ion | intranet/apps/users/templatetags/grades.py | Python | gpl-2.0 | 329 |
import array
def read_symbol_array(path):
with open(path, 'r') as f:
return array.array('H', map(int, f.read().split()))
def format_sequence(seq):
def wrap(s):
return '{' + s + '}'
a, b = (seq, '') if len(seq) <= 10 else (seq[0:10], ' ...')
return wrap(' '.join(map(str, a)) + b)
def write_symbol_array(path, symbols):
with open(path, "w") as f:
f.write(' '.join(map(str, symbols)))
| jade-cheng/Jocx | src/ziphmm/_internal.py | Python | gpl-2.0 | 433 |
import os
import types
from sllib.LLSD import LLSD
try:
os.makedirs('./httpcap')
except:
pass
data = open('httpcap.txt','r').read()
c = 0
btag = '<llsd>'
etag = '</llsd>'
##mbtag = '<key>message</key><string>'
##metag = '</string>'
b = data.find(btag)
mnames = {}
while b >= 0:
e = data.find(etag, b) + len(etag)
xml = data[b:e]
## bm = xml.rfind(mbtag)
## em = xml.find(metag, bm)
## if bm >= 0 and em >= 0 and em >= bm:
## bm = bm + len(mbtag)
## m = xml[bm:em]
## mnames[m] = None
## else:
## m = 'Unknown'
ll = LLSD.fromstring(xml)
m = 'DATA'
if type(ll) == types.DictType and ll.has_key('events'):
## print ll
for msg in ll['events']:
m = msg['message']
## print m
mnames[m] = None
name = './httpcap/%s_%d.xml' % (m,c)
try:
open(name, 'w+').write(xml)
except:
print xml
raise
c += 1
b = data.find(btag, e)
print mnames.keys()
| MarioVilas/secondlife-experiments | SimProxy/extract_xml.py | Python | gpl-2.0 | 1,050 |
#!/usr/bin/env python
# test libvirt cpu stats
import libvirt
from libvirt import libvirtError
from src import sharedmod
from utils import utils
required_params = ('cpuNum',)
optional_params = {'conn': '', }
STATFILE = "/proc/stat"
GETCPUSTAT = "cat /proc/stat | grep cpu%s"
USR_POS = 1
NI_POS = 2
SYS_POS = 3
IDLE_POS = 4
IOWAIT_POS = 5
IRQ_POS = 6
SOFTIRQ_POS = 7
def compare_result(dest, src, delta, logger):
""" compare two integer results with delta bias
"""
if dest >= src - delta and dest <= src + delta:
return True
return False
def check_stat(cpu, stat, stat_type, logger):
""" check cpu stat for cpu[cpunum]
"""
delta = 0
if cpu == "-1":
cmd = GETCPUSTAT % " | head -1"
cpu = ""
else:
cmd = GETCPUSTAT % cpu
status, out = utils.exec_cmd(cmd, shell=True)
if status != 0:
logger.error("Exec %s fails" % cmd)
return False
logger.debug("get cpu%s stats: %s" % (cpu, out))
stats = out[0].split()
logger.debug("cpu stats: %s" % stats)
if stat_type == "kernel":
target_stat = int(stats[SYS_POS]) + int(stats[IRQ_POS]) + \
int(stats[SOFTIRQ_POS])
delta = 1
elif stat_type == "idle":
target_stat = int(stats[IDLE_POS])
delta = 10
elif stat_type == "user":
target_stat = int(stats[USR_POS]) + int(stats[NI_POS])
delta = 2
elif stat_type == "iowait":
target_stat = int(stats[IOWAIT_POS])
delta = 10
else:
logger.error("Unidentified type %s" % stat_type)
return False
if compare_result(stat, target_stat, delta, logger):
logger.info("%s stat check success" % stat_type)
else:
logger.error("%s stat check failed" % stat_type)
logger.error("%s stat is %d, should be %d" %
(stat_type, stat, target_stat))
return False
return True
def cpu_stats(params):
""" test libvirt cpu stats
"""
logger = params['logger']
cpunum = int(params['cpuNum'])
stat_types = ['kernel', 'idle', 'user', 'iowait']
try:
# get connection firstly.
# If conn is not specified, use conn from sharedmod
if 'conn' in params:
conn = libvirt.open(params['conn'])
else:
conn = sharedmod.libvirtobj['conn']
res = conn.getCPUStats(cpunum, 0)
for s in stat_types:
if not s in res:
logger.error("%s is not the key" % s)
return 1
if not check_stat(str(cpunum), res[s] / 10000000, s, logger):
return 1
except libvirtError, e:
logger.error("API error message: %s, error code is %s" %
e.message)
return 1
return 0
| ryanmiao/libvirt-test-API | repos/virconn/cpu_stats.py | Python | gpl-2.0 | 2,792 |
"""
python bindings to flux-core, the main core of the flux resource manager
"""
# Import core symbols directly, allows flux.FLUX_MSGTYPE_ANY for example
# pylint: disable=wildcard-import
from flux.constants import *
from flux.core import Flux
__all__ = ['core',
'kvs',
'jsc',
'rpc',
'sec',
'constants',
'Flux', ]
| lipari/flux-core | src/bindings/python/flux/__init__.py | Python | gpl-2.0 | 381 |
"""
The symbols and rules for the CFG of C. I generated these myself by hand, so
they're probably not perfectly correct.
"""
from rules_obj import *
from lexer import *
import tokens
### Symbols ###
# Most symbols are either self-explanatory, or best understood by examining the
# rules below to see how they're used.
S = Symbol("S")
main_setup = Symbol("main_setup") #TODO: is this neccesary?
# `statments` is a buch of `statement`s
statements = Symbol("statements")
# `statement` is a single C statement, semicolon included
statement = Symbol("statement")
# a generic expression
E = Symbol("E")
declare_separator = Symbol("declare_separator")
declare_type = Symbol("declare_type")
declare_expression = Symbol("declare_expression");
arr_start = Symbol("arr_start")
arr_end = Symbol("arr_end")
arr_list = Symbol("arr_list")
if_start = Symbol("if_start");
if_statement = Symbol("if_statement");
else_statement = Symbol("else_statement");
while_start = Symbol("while_start")
while_statement = Symbol("while_statement")
for_start = Symbol("for_start")
for1 = Symbol("for1")
for2 = Symbol("for2")
for3 = Symbol("for3")
for_expr = Symbol("for_expr")
arg_start = Symbol("arg_start")
func_dec = Symbol("func_dec")
func_def = Symbol("func_def")
func_call_start = Symbol("func_call_start")
### Rules ###
# After adding a rule, make sure to add it to the rules list at the bottom!
# something that stands alone as a program, plus a function definition or
# declaration, can also stand alone as a program.
main_func_dec_cont = Rule(S, [S, func_dec])
main_func_def_cont = Rule(S, [S, func_def])
main_func_dec = Rule(S, [func_dec])
main_func_def = Rule(S, [func_def])
# make a `statements` symbol by extending another `statements` symbol
statements_cont = Rule(statements, [statements,
statement])
# make a single `statement` symbol into a `statements` symbol
statements_end = Rule(statements, [statement])
# return statement
return_form = Rule(statement, [tokens.return_command,
E,
tokens.semicolon])
# a print statement
# The print statement is not valid C. I added it for ease of use, however, as
# I do not forsee this compiler being able to inclue stdio.h anytime soon.
print_form = Rule(statement, [tokens.print_command,
E,
tokens.semicolon])
# a declaration of the form int;
useless_declaration = Rule(statement, [Token("type"), tokens.semicolon])
# a declaration of the form `int a;` or `int a, b = 0;`
real_declaration = Rule(statement, [declare_expression, tokens.semicolon])
# the type part of a declaration, along with any pointers on the first variable
declare_type_base = Rule(declare_type, [Token("type")])
declare_type_cont = Rule(declare_type, [declare_type, tokens.aster])
# used to separate declarations. all these are declare_separators:
# ,
# ,*
# , **
#
declare_separator_base = Rule(declare_separator, [tokens.comma])
declare_separator_cont = Rule(declare_separator, [declare_separator, tokens.aster])
# the base of a declaration, like `int hello` or `int* hello`.
base_declare = Rule(declare_expression, [declare_type, Token("name")])
# a non-array declaration with an assignment, like `int hello = 4` or `int* hello = &p`.
assign_declare = Rule(declare_expression, [declare_expression, tokens.equal, E], 49)
# an array declaration with assignment, like `int hi[4] = {1, 2, 3, 4}`.
# Note--I imagine a better parser would catch things like `int hi = {1, 3}`.
# Mine, however, catches these errors at the code generation stage.
arr_assign_declare = Rule(declare_expression, [declare_expression, tokens.equal, arr_list], 49)
# Converts things like `int a, b` into a fresh declare_expression to chain declarations
cont_declare = Rule(declare_expression, [declare_expression, declare_separator, Token("name")])
# Defines `int a[5]` as a valid declare expression
array_num_declare = Rule(declare_expression, [declare_expression,
tokens.open_sq_bracket,
E,
tokens.close_sq_bracket])
# Defines `int a[]` as a valid declare expression
array_nonum_declare = Rule(declare_expression, [declare_expression,
tokens.open_sq_bracket,
tokens.close_sq_bracket])
E_num = Rule(E, [Token("integer")])
E_parens = Rule(E, [tokens.open_paren,
E,
tokens.close_paren])
# Badly named--E_add can be binary addition or subtraction
E_add = Rule(E, [E,
Token("addop"),
E], 85)
E_mult = Rule(E, [E,
tokens.aster,
E], 90)
E_div = Rule(E, [E,
tokens.slash,
E], 90)
E_mod = Rule(E, [E,
tokens.percent,
E], 90)
E_boolean_and = Rule(E, [E,
tokens.logic_and,
E], 65)
E_boolean_or = Rule(E, [E,
tokens.logic_or,
E], 60)
E_eq_compare = Rule(E, [E,
Token("eq_compare"),
E], 70)
E_compare = Rule(E, [E,
Token("compare"),
E], 75)
# Again, badly named. E_neg can be either unary addition or subtraction
E_neg = Rule(E, [Token("addop"),
E], 95)
# Note this covers all of `a = 5`, `a *= 5`, `a /= 5`, etc.
# We give this rule a priority of 49, which is less than 50 (the priority) of
# the assignment symbols. This makes it right associative.
E_equal = Rule(E, [E,
Token("assignment"),
E], 49)
E_boolean_not = Rule(E, [tokens.logic_not, E], 95)
# Covers both a++ and a--
E_inc_after = Rule(E, [E,
Token("crement")], 100)
# Covers both ++a and --a
E_inc_before = Rule(E, [Token("crement"),
E], 95)
E_point = Rule(E, [tokens.aster, E], 95)
E_deref = Rule(E, [tokens.amper, E], 95)
# Calling a function like `f()`
E_func_noarg = Rule(E, [E, tokens.open_paren, tokens.close_paren])
# The start of a function call and first argument, like `f(1`
E_func_call_start = Rule(func_call_start, [E, tokens.open_paren, E], 0)
# Chaining more arguments onto the function call
E_func_call_cont = Rule(func_call_start, [func_call_start, tokens.comma, E], 0)
# Completing the function call
E_func_call_end = Rule(E, [func_call_start, tokens.close_paren])
# Array referencing, like `a[4]`
E_array = Rule(E, [E, tokens.open_sq_bracket, E, tokens.close_sq_bracket], 100)
E_var = Rule(E, [Token("name")])
E_form = Rule(statement, [E, tokens.semicolon])
# We have to separate out the start so (E) doesn't reduce to E in `if(E)`
if_start_form = Rule(if_start, [tokens.if_keyword,
tokens.open_paren])
# an if statement like `if(E) {}`
if_form_brackets = Rule(if_statement, [if_start,
E,
tokens.close_paren,
tokens.open_bracket,
tokens.close_bracket])
# a one line if statement like `if(E) a = 5;`
# it's OK to use "statements" here because statement -> statements immediately,
# so then this rule will apply right away
if_form_oneline = Rule(if_statement, [if_start,
E,
tokens.close_paren,
statements])
# the most common if form, like `if(E) {a = 5;}`
if_form_main = Rule(if_statement, [if_start,
E,
tokens.close_paren,
tokens.open_bracket,
statements,
tokens.close_bracket])
# Same things, but for else
else_form_brackets = Rule(else_statement, [tokens.else_keyword,
tokens.open_bracket,
tokens.close_bracket])
else_form_oneline = Rule(else_statement, [tokens.else_keyword,
statements])
else_form_main = Rule(else_statement, [tokens.else_keyword,
tokens.open_bracket,
statements,
tokens.close_bracket])
# We use a priority here so if an "else" follows an "if_statement", the parser
# won't apply the if_form_general rule (instead of the correct ifelse_form_general)
if_form_general = Rule(statement, [if_statement], 200)
ifelse_form_general = Rule(statement, [if_statement, else_statement])
break_form = Rule(statement, [tokens.break_keyword, tokens.semicolon])
cont_form = Rule(statement, [tokens.cont_keyword, tokens.semicolon])
# We have to separate out the start so (E) doesn't reduce to E
while_start_form = Rule(while_start, [tokens.while_keyword, tokens.open_paren])
# Same as if statement rules
while_form_brackets = Rule(statement, [while_start,
E,
tokens.close_paren,
tokens.open_bracket,
tokens.close_bracket])
while_form_oneline = Rule(statement, [while_start,
E,
tokens.close_paren,
statements])
while_form_main = Rule(statement, [while_start,
E,
tokens.close_paren,
tokens.open_bracket,
statements,
tokens.close_bracket])
# for statements
for_start_form = Rule(for_start, [tokens.for_keyword, tokens.open_paren])
for1_form = Rule(for1, [for_start, statements])
# The `statements` here better have a tree of the form:
# statements -> statement -> E, semicolon
# A better parser would probably check this while parsing, but I check during
# code gen.
for2_form = Rule(for2, [for1, statements])
for_expr_form = Rule(for_expr, [for2, E, tokens.close_paren])
for_expr_form_empty = Rule(for_expr, [for2, tokens.close_paren])
# Same as if statement rules
for_form_empty = Rule(statement, [for_expr,
tokens.semicolon])
for_form_brackets = Rule(statement, [for_expr,
tokens.open_bracket,
tokens.close_bracket])
for_form_oneline = Rule(statement, [for_expr,
statements])
for_form_main = Rule(statement, [for_expr,
tokens.open_bracket,
statements,
tokens.close_bracket])
# Array initializer with one element, like `{1}`
arr_list_one = Rule(arr_list, [tokens.open_bracket, E, tokens.close_bracket])
# Array initializer with no elements, like `{}`
arr_list_none = Rule(arr_list, [tokens.open_bracket, tokens.close_bracket])
# Start of array initializer and first element, like `{1,`
arr_list_start = Rule(arr_start, [tokens.open_bracket, E, tokens.comma])
# Contining array initalizer, like `{1, 2,`
arr_list_cont = Rule(arr_start, [arr_start, E, tokens.comma])
# Total array initializer, like `{1, 2, 3}`
arr_list_total = Rule(arr_list, [arr_start, arr_end])
# Array initializer end, like `3}`
arr_list_end = Rule(arr_end, [E, tokens.close_bracket])
# Argument list for defining/declaring functions
base_arg_form = Rule(arg_start, [declare_expression, # should have children [declare_type, name]
tokens.open_paren,
declare_expression])
cont_arg_form = Rule(arg_start, [arg_start,
tokens.comma,
declare_expression]) # should have kids [declare_type, name]
func_dec_form = Rule(func_dec, [arg_start, tokens.close_paren, tokens.semicolon])
func_def_form = Rule(func_def, [arg_start,
tokens.close_paren,
tokens.open_bracket,
statements,
tokens.close_bracket])
noarg_func_dec_form = Rule(func_dec, [declare_expression,
tokens.open_paren,
tokens.close_paren,
tokens.semicolon])
noarg_func_def_form = Rule(func_def, [declare_expression,
tokens.open_paren,
tokens.close_paren,
tokens.open_bracket,
statements,
tokens.close_bracket])
semicolon_form = Rule(statement, [tokens.semicolon])
# List of all the rules to apply. Applied in the listed order.
# In general, try to list rules above in the same order as they're listed here.
rules = [main_func_def_cont,
main_func_dec_cont,
main_func_def,
main_func_dec,
statements_cont,
statements_end,
return_form,
print_form,
useless_declaration,
real_declaration,
declare_type_base,
declare_type_cont,
declare_separator_base,
declare_separator_cont,
base_declare,
assign_declare,
arr_assign_declare,
cont_declare,
array_num_declare,
array_nonum_declare,
E_num,
E_parens,
E_add,
E_mult,
E_div,
E_mod,
E_boolean_and,
E_boolean_or,
E_eq_compare,
E_compare,
E_neg,
E_equal,
E_boolean_not,
E_inc_after,
E_inc_before,
E_point,
E_deref,
E_func_noarg,
E_func_call_start,
E_func_call_cont,
E_func_call_end,
E_array,
E_var,
E_form,
if_start_form,
if_form_brackets,
if_form_oneline,
if_form_main,
if_form_general,
else_form_brackets,
else_form_oneline,
else_form_main,
ifelse_form_general,
break_form,
cont_form,
while_start_form,
while_form_brackets,
while_form_oneline,
while_form_main,
for_start_form,
for1_form,
for2_form,
for_expr_form,
for_expr_form_empty,
for_form_brackets,
for_form_oneline,
for_form_main,
arr_list_one,
arr_list_none,
arr_list_start,
arr_list_cont,
arr_list_total,
arr_list_end,
base_arg_form,
cont_arg_form,
func_dec_form,
func_def_form,
noarg_func_dec_form,
noarg_func_def_form,
semicolon_form]
| ShivamSarodia/ShivC | rules.py | Python | gpl-2.0 | 15,247 |
'''
Created on 2014. 9. 26.
@author: jongyeob
'''
from __future__ import absolute_import
import sys
import logging
import re
from . import utils
from .utils import datetime as dt
from .utils import download as dl
DATA_DIR = 'data/'
LOG = logging.getLogger(__name__); LOG.setLevel(0)
PACKAGES = ''
INST_NAME = ['mag','swepam','sis','loc']
MAG_KEYS = ['datetime','status','bx','by','bz','bt','latitude','longitude']
SWEPAM_KEYS = ['datetime','status','density','speed','temperature']
SIS_KEYS = []
LOC_KEYS = []
INST_KEYS = [MAG_KEYS,SWEPAM_KEYS,SIS_KEYS,LOC_KEYS]
def initialize(config=Config()):
global DATA_DIR,PACKAGES
config.set_section(__name__)
config.load_ns('DATA_DIR',globals())
config.load_ns('PACKAGES',globals())
LOG = utils.get_logger()
for pkg in PACKAGES.split():
utils.import_all(pkg, globals())
def empty_data(instrument):
mag = {'datetime':[],'status':[],'bx':[],'by':[],'bz':[],'bt':[],'latitude':[],'longitude':[]}
swepam = {'datetime':[],'status':[],'density':[],'speed':[],'temperature':[]}
return [mag,swepam,None,None][INST_NAME.index(instrument)]
def check_instrument(data):
'''
Check data dictionary
:param dict data: Input data
:return: (list) INST_NAME
'''
inst = INST_NAME[:]
i = 0
for keys in INST_KEYS:
if len(keys) == 0:
inst.remove(INST_NAME[i])
for key in keys:
if data.has_key(key) == False:
inst.remove(INST_NAME[i])
break
i += 1
return inst
def load(start_date,instrument,end_date=''):
'''
Load files of instrument (mag,swepam) of ACE realtime, from start datetime to end datetime
:param string|datetime start_date: start date for searching
:param string|datetime end_date: end date for searching
:param string instrument: Instrument name [swepam,mag]
:return: dictionary for instrument
:rtype: dict
'''
start_dt = dt.parse(start_date)
end_dt = start_dt
if end_date is not None:
end_dt = dt.parse(end_date)
data_total = empty_data(instrument)
for t in dt.datetime_range(start_dt, end_dt, months=1):
localfile = local_path(dt.tuples(t,'date'), instrument)
try:
data = load_file(localfile,instrument)
except:
LOG.error("Data can not read - %s"%(localfile))
continue
for i in range(len(data['datetime'])):
if start_dt <= dt.parsing(data['datetime'][i]) <= end_dt:
for key in data_total.keys():
data_total[key].append(data[key][i])
return data_total
def load_file(filepath,instrument):
data = None
if(instrument == 'mag'):
data = load_mag(filepath)
elif(instrument == 'swepam'):
data = load_swepam(filepath)
return data
def load_mag(filepath):
'''
Load a file is magnetic parameters of 1hr averaged ACE realtime data
:param string filepath: local filepath
:return: (dict) mag data
'''
lines = []
with open(filepath) as f:
lines = f.readlines()
mag = empty_data('mag')
for line in lines:
###
# if mag['date'] is None:
# date = re.match(':Product: (\d+)_ace_(\S+)_1h.txt',line)
# if(date is not None):
# mag['date'] = date.group(1)[:4]+'-'+date.group(1)[-2:]
# continue
###
data = re.match('\A(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+',line)
if(data is not None):
datetime_string = "%4s-%02s-%02s"%data.groups()[0:3] + " %2s:%2s:00"%(data.group(4)[0:2],data.group(4)[2:4])
mag['datetime'].append(datetime_string)
i = 5
for key in MAG_KEYS[1:]:
mag[key].append(data.group(i))
i = i + 1
return mag
def load_swepam(filepath):
'''
Load a file is solar wind parameters of 1hr averaged ACE realtime data
:param string filepath: local filepath
:return: (dict) swepam data
'''
lines = []
with open(filepath) as f:
lines = f.readlines()
swepam = empty_data('swepam')
for line in lines:
###
# if item['date'] is None:
# date = re.match(':Product: (\d+)_ace_(\S+)_1h.txt',line)
# if(date is not None):
# item['date'] = date.group(1)[:4]+'-'+date.group(1)[-2:]
# continue
###
data = re.match('\A(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)\s+(\S+)\s+(\S+)\s+(\S+)',line)
if(data is not None):
datetime_string = "%4s-%2s-%2s"%data.groups()[0:3] + " %2s:%2s:00"%(data.group(4)[0:2],data.group(4)[2:4])
swepam['datetime'].append(datetime_string)
i = 5
for key in SWEPAM_KEYS[1:]:
swepam[key].append(data.group(i))
i = i + 1
return(swepam)
def local_path(date,inst):
'''
Return file path pattern string.
:param tuple date: date
:param string inst: instrument name
:return: file path
'''
yyyy,mm,_ = date
localfile = DATA_DIR + 'ace_rt1h/%04d/%4d%02d_ace_%s_1h.txt'%(yyyy,yyyy,mm,inst)
LOG.debug("local file : %s"%(localfile))
return localfile
def remote_path(date,inst):
host = 'http://www.swpc.noaa.gov'
loc = '/ftpdir/lists/ace2'
yyyy,mm,_ = date
return host + loc + '/%4d%02d_ace_%s_1h.txt'%(yyyy,mm,inst)
def download_file(date,inst,filepath='',overwrite=False):
'''
Download ACE Realtime 1h average data.
:param datetime date: Datetime
:param string inst: Instrument name
:return: Downloaded path
'''
f = remote_path(dt.tuples(date, 'date'), inst)
if filepath == '' :
filepath = local_path(date, inst)
LOG.debug("Download start : %s"%(f))
rv = dl.download_http_file(f, filepath,overwrite=overwrite)
return rv
def download(start_date,instrument,end_date=None,overwrite=False):
'''
Download files of instrument of ACE realtime, from start_datetime to end_datetime
:param string instrument: Instrument name [swepam,mag]
:param string|datetime start_date: start date for searching
:param string|datetime end_date: end date for searching
:return: (list) file list
'''
start_dt = dt.parse(start_date)
end_dt = start_dt
if end_date is not None:
end_dt = dt.parse(end_date)
for t in dt.datetime_range(start_dt, end_dt, months=1):
localfile = local_path(dt.tuples(t,'date'), instrument)
try:
rv = download_file(t, instrument, localfile,overwrite=overwrite)
except Exception as err:
LOG.error(str(err))
if rv == False:
LOG.error("Download failed : %s"%(localfile))
if __name__ == '__main__':
logging.basicConfig(level=0)
from swpy.backup import _data as da
start = (2014,01,01)
end = (2014,01,02)
print local_path(start, 'mag')
print local_path(start, 'mag')
download(start, 'mag',end_date = end)
download(start, 'swepam',end_date = end)
print load(start,'mag',end_date=end)
print load(start,'swepam',end_date=end)
| jongyeob/swpy | swpy/backup/ace.py | Python | gpl-2.0 | 7,732 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
# Load extensions
(
sh % "cat"
<< r"""
[extensions]
arcconfig=$TESTDIR/../edenscm/hgext/extlib/phabricator/arcconfig.py
arcdiff=
"""
>> "$HGRCPATH"
)
# Diff with no revision
sh % "hg init repo"
sh % "cd repo"
sh % "touch foo"
sh % "hg add foo"
sh % "hg ci -qm 'No rev'"
sh % "hg diff --since-last-submit" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: local changeset is not associated with a differential revision
[255]"""
# Fake a diff
sh % "echo bleet" > "foo"
sh % "hg ci -qm 'Differential Revision: https://phabricator.fb.com/D1'"
sh % "hg diff --since-last-submit" == r"""
abort: no .arcconfig found
[255]"""
sh % "hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: no .arcconfig found
[255]"""
# Prep configuration
sh % "echo '{}'" > ".arcrc"
sh % 'echo \'{"config" : {"default" : "https://a.com/api"}, "hosts" : {"https://a.com/api/" : { "user" : "testuser", "oauth" : "garbage_cert"}}}\'' > ".arcconfig"
# Now progressively test the response handling for variations of missing data
sh % "cat" << r"""
[{}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
Error calling graphql: Unexpected graphql response format
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"differential_diffs": {"count": 3},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit" == r"""
abort: unable to determine previous changeset hash
[255]"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == r"""
abort: unable to determine previous changeset hash
[255]"""
# This is the case when the diff is up to date with the current commit;
# there is no diff since what was landed.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"2e6531b7dada2a3e5638e136de05f51e94a427f4\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "2e6531b7dada2a3e5638e136de05f51e94a427f4 Differential Revision: https://phabricator.fb.com/D1"
# This is the case when the diff points at our parent commit, we expect to
# see the bleet text show up. There's a fake hash that I've injected into
# the commit list returned from our mocked phabricator; it is present to
# assert that we order the commits consistently based on the time field.
sh % "cat" << r"""
[{"data": {"query": [{"results": {"nodes": [{
"number": 1,
"diff_status_name": "Needs Review",
"latest_active_diff": {
"local_commit_info": {
"nodes": [
{"property_value": "{\"lolwut\": {\"time\": 0, \"commit\": \"88dd5a13bf28b99853a24bddfc93d4c44e07c6bd\"}}"}
]
}
},
"differential_diffs": {"count": 1},
"is_landing": false,
"land_job_status": "NO_LAND_RUNNING",
"needs_final_review_status": "NOT_NEEDED",
"created_time": 123,
"updated_time": 222
}]}}]}}]
""" > "$TESTTMP/mockduit"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(.)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit-2o" == r"""
Phabricator rev: 88dd5a13bf28b99853a24bddfc93d4c44e07c6bd
Local rev: 2e6531b7dada2a3e5638e136de05f51e94a427f4 (.)
Changed: foo
| ...
| +bleet"""
# Make a new commit on top, and then use -r to look at the previous commit
sh % "echo other" > "foo"
sh % "hg commit -m 'Other commmit'"
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg diff --since-last-submit --nodates -r 2e6531b" == r"""
diff -r 88dd5a13bf28 -r 2e6531b7dada foo
--- a/foo
+++ b/foo
@@ -0,0 +1,1 @@
+bleet"""
sh % "'HG_ARC_CONDUIT_MOCK=$TESTTMP/mockduit' hg log -r 'lastsubmitted(2e6531b)' -T '{node} {desc}\\n'" == "88dd5a13bf28b99853a24bddfc93d4c44e07c6bd No rev"
| facebookexperimental/eden | eden/scm/tests/test-fb-hgext-diff-since-last-submit-t.py | Python | gpl-2.0 | 6,148 |
"""Helper functions for transforming results."""
import hashlib
import logging
import os
import re
import urllib.parse
from typing import Optional
from docutils.core import publish_parts
from dump2polarion.exporters.verdicts import Verdicts
# pylint: disable=invalid-name
logger = logging.getLogger(__name__)
TEST_PARAM_RE = re.compile(r"\[.*\]")
def only_passed_and_wait(result):
"""Return PASS and WAIT results only, skips everything else."""
verdict = result.get("verdict", "").strip().lower()
if verdict in Verdicts.PASS + Verdicts.WAIT:
return result
return None
def insert_source_info(result):
"""Add info about source of test result if available."""
comment = result.get("comment")
# don't change comment if it already exists
if comment:
return
source = result.get("source")
job_name = result.get("job_name")
run = result.get("run")
source_list = [source, job_name, run]
if not all(source_list):
return
source_note = "/".join(source_list)
source_note = "Source: {}".format(source_note)
result["comment"] = source_note
def setup_parametrization(result, parametrize):
"""Modify result's data according to the parametrization settings."""
if parametrize:
# remove parameters from title
title = result.get("title")
if title:
result["title"] = TEST_PARAM_RE.sub("", title)
# remove parameters also from id if it's identical to title
if title and result.get("id") == title:
result["id"] = result["title"]
else:
# don't parametrize if not specifically configured
if "params" in result:
del result["params"]
def include_class_in_title(result):
"""Make sure that test class is included in "title".
Applies only to titles derived from test function names, e.g.
"test_power_parent_service" -> "TestServiceRESTAPI.test_power_parent_service"
>>> result = {"title": "test_foo", "id": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'TestFoo.test_foo'
>>> str(result.get("id"))
'TestFoo.test_foo'
>>> result.get("classname")
>>> result = {"title": "some title", "id": "test_foo", "classname": "foo.bar.baz.TestFoo",
... "file": "foo/bar/baz.py"}
>>> include_class_in_title(result)
>>> str(result.get("title"))
'some title'
>>> str(result.get("id"))
'test_foo'
"""
classname = result.get("classname", "")
if not classname:
return
filepath = result.get("file", "")
title = result.get("title")
if title and title.startswith("test_") and "/" in filepath and "." in classname:
fname = filepath.split("/")[-1].replace(".py", "")
last_classname = classname.split(".")[-1]
# last part of classname is not file name
if fname != last_classname and last_classname not in title:
result["title"] = "{}.{}".format(last_classname, title)
# update also the id if it is identical to original title
if result.get("id") == title:
result["id"] = result["title"]
# we don't need to pass classnames?
del result["classname"]
def gen_unique_id(string):
"""Generate unique id out of a string.
>>> gen_unique_id("vmaas_TestClass.test_name")
'5acc5dc795a620c6b4491b681e5da39c'
"""
return hashlib.sha1(string.encode("utf-8")).hexdigest()[:32]
def get_testcase_id(testcase, append_str):
"""Return new test case ID.
>>> get_testcase_id({"title": "TestClass.test_name"}, "vmaas_")
'5acc5dc795a620c6b4491b681e5da39c'
>>> get_testcase_id({"title": "TestClass.test_name", "id": "TestClass.test_name"}, "vmaas_")
'5acc5dc795a620c6b4491b681e5da39c'
>>> get_testcase_id({"title": "TestClass.test_name", "id": "test_name"}, "vmaas_")
'5acc5dc795a620c6b4491b681e5da39c'
>>> get_testcase_id({"title": "some title", "id": "TestClass.test_name"}, "vmaas_")
'2ea7695b73763331f8a0c4aec75362b8'
>>> str(get_testcase_id({"title": "some title", "id": "some_id"}, "vmaas_"))
'some_id'
"""
testcase_title = testcase.get("title")
testcase_id = testcase.get("id")
if not testcase_id or testcase_id.lower().startswith("test"):
testcase_id = gen_unique_id("{}{}".format(append_str, testcase_title))
return testcase_id
def parse_rst_description(testcase):
"""Create an HTML version of the RST formatted description."""
description = testcase.get("description")
if not description:
return
try:
with open(os.devnull, "w") as devnull:
testcase["description"] = publish_parts(
description,
writer_name="html",
settings_overrides={"report_level": 2, "halt_level": 2, "warning_stream": devnull},
)["html_body"]
# pylint: disable=broad-except
except Exception as exp:
testcase_id = testcase.get("nodeid") or testcase.get("id") or testcase.get("title")
logger.error("%s: description: %s", str(exp), testcase_id)
def preformat_plain_description(testcase):
"""Create a preformatted HTML version of the description."""
description = testcase.get("description")
if not description:
return
# naive approach to removing indent from pytest docstrings
nodeid = testcase.get("nodeid") or ""
indent = None
if "::Test" in nodeid:
indent = 8 * " "
elif "::test_" in nodeid:
indent = 4 * " "
if indent:
orig_lines = description.split("\n")
new_lines = []
for line in orig_lines:
if line.startswith(indent):
line = line.replace(indent, "", 1)
new_lines.append(line)
description = "\n".join(new_lines)
testcase["description"] = "<pre>\n{}\n</pre>".format(description)
def add_unique_runid(testcase, run_id=None):
"""Add run id to the test description.
The `run_id` runs makes the descriptions unique between imports and force Polarion
to update every testcase every time.
"""
testcase["description"] = '{visible}<br id="{invisible}"/>'.format(
visible=testcase.get("description") or "empty-description-placeholder",
invisible=run_id or id(add_unique_runid),
)
def get_full_repo_address(repo_address: Optional[str]):
"""Make sure the repo address is complete path in repository.
>>> get_full_repo_address("https://gitlab.com/somerepo")
'https://gitlab.com/somerepo/blob/master/'
>>> get_full_repo_address("https://github.com/otherrepo/blob/branch/")
'https://github.com/otherrepo/blob/branch/'
>>> get_full_repo_address(None)
"""
if not repo_address:
return None
if "/blob/" not in repo_address:
# the master here should probably link the latest "commit" eventually
repo_address = "{}/blob/master".format(repo_address)
# make sure the / is present at the end of address
repo_address = "{}/".format(repo_address.rstrip("/ "))
return repo_address
def fill_automation_repo(repo_address: Optional[str], testcase: dict) -> dict:
"""Fill repo address to "automation_script" if missing."""
automation_script = testcase.get("automation_script")
if not automation_script:
return testcase
if not repo_address:
del testcase["automation_script"]
return testcase
if automation_script.startswith("http"):
return testcase
testcase["automation_script"] = urllib.parse.urljoin(repo_address, automation_script)
return testcase
def add_automation_link(testcase):
"""Append link to automation script to the test description."""
automation_script = testcase.get("automation_script")
if not automation_script:
return testcase
automation_link = '<a href="{}">Test Source</a>'.format(automation_script)
testcase["description"] = "{}<br/>{}".format(testcase.get("description") or "", automation_link)
return testcase
| mkoura/dump2polarion | dump2polarion/exporters/transform.py | Python | gpl-2.0 | 8,094 |
from django import template
register = template.Library()
class RepeatNode(template.Node):
def __init__(self, nodelist, count):
self.nodelist = nodelist
self.count = template.Variable(count)
def render(self, context):
output = self.nodelist.render(context)
return output * int(self.count.resolve(context) + 1)
def repeat(parser, token):
"""
Repeats the containing text a certain number of times.
Requires a single argument, an integer, to indicate the number of times to
repeat the enclosing content.
Example::
{% repeat 3 %}foo{% endrepeat %}
Yields::
foofoofoo
"""
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError('%r tag requires 1 argument.' % bits[0])
count = bits[1]
nodelist = parser.parse(('endrepeat',))
parser.delete_first_token()
return RepeatNode(nodelist, count)
repeat = register.tag(repeat)
| julcollas/django-smokeping | smokeping/templatetags/repeat.py | Python | gpl-2.0 | 1,004 |
import numpy as np
import pandas as pd
from scipy.stats.mstats import plotting_positions
from ..mapping.evaluation import after_stat
from ..doctools import document
from ..exceptions import PlotnineError
from .distributions import get_continuous_distribution
from .stat import stat
# Note: distribution should be a name from scipy.stat.distribution
@document
class stat_qq(stat):
"""
Calculation for quantile-quantile plot
{usage}
Parameters
----------
{common_parameters}
distribution : str (default: norm)
Distribution or distribution function name. The default is
*norm* for a normal probability plot. Objects that look enough
like a stats.distributions instance (i.e. they have a ppf
method) are also accepted. See :mod:`scipy stats <scipy.stats>`
for available distributions.
dparams : dict
Distribution-specific shape parameters (shape parameters plus
location and scale).
quantiles : array_like, optional
Probability points at which to calculate the theoretical
quantile values. If provided, must be the same number as
as the sample data points. The default is to use calculated
theoretical points, use to ``alpha_beta`` control how
these points are generated.
alpha_beta : tuple
Parameter values to use when calculating the quantiles.
Default is :py:`(3/8, 3/8)`.
See Also
--------
scipy.stats.mstats.plotting_positions : Uses ``alpha_beta``
to calculate the quantiles.
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'theoretical' # theoretical quantiles
'sample' # sample quantiles
"""
REQUIRED_AES = {'sample'}
DEFAULT_AES = {'x': after_stat('theoretical'), 'y': after_stat('sample')}
DEFAULT_PARAMS = {'geom': 'qq', 'position': 'identity',
'na_rm': False,
'distribution': 'norm', 'dparams': (),
'quantiles': None, 'alpha_beta': (3/8, 3/8)}
@classmethod
def compute_group(cls, data, scales, **params):
sample = data['sample'].sort_values().values
alpha, beta = params['alpha_beta']
quantiles = params['quantiles']
if quantiles is None:
quantiles = plotting_positions(sample, alpha, beta)
elif len(quantiles) != len(sample):
raise PlotnineError(
"The number of quantile values is not the same as "
"the number of sample values.")
quantiles = np.asarray(quantiles)
cdist = get_continuous_distribution(params['distribution'])
theoretical = cdist.ppf(quantiles, *params['dparams'])
return pd.DataFrame({'sample': sample,
'theoretical': theoretical})
| has2k1/plotnine | plotnine/stats/stat_qq.py | Python | gpl-2.0 | 2,879 |
# -*- coding: UTF-8 -*-
"""
this is the default settings, don't insert into your customized settings!
"""
DEBUG = True
TESTING = True
SECRET_KEY = "5L)0K%,i.;*i/s("
SECURITY_SALT = "sleiuyyao"
# DB config
SQLALCHEMY_DATABASE_URI = "sqlite:///dev.db"
SQLALCHEMY_ECHO = True
UPLOADS_DEFAULT_DEST = 'uploads'
LOG_FILE = 'log.txt'
ERROR_LOG_RECIPIENTS = []
# Flask-Mail related configuration, refer to
# `http://pythonhosted.org/flask-mail/#configuring-flask-mail`
MAIL_SERVER = 'smtp.foo.com'
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
MAIL_DEFAULT_SENDER = 'user@foo.com'
FREEZER_RELATIVE_URLS = False
| PuZheng/cloud-dashing | cloud_dashing/default_settings.py | Python | gpl-2.0 | 618 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
import unittest
from ..morf import analyze, disambiguate
# EINO SANTANEN. Muodon vanhimmat
# http://luulet6lgendus.blogspot.com/
sentences = '''KÕIGE VANEM MUDEL
Pimedas luusivad robotid,
originaalsed tšehhi robotid kahekümnendatest.
Robota! kisendavad nad, uhked originaalsed robotid,
hüüdes iseenda nime.
Robota! möirgavad nad, naftasegused elukad,
hiiglase vaimusünnitised, robotid:
kurvameelsetena kauguses,
ebamäärastena kauguses,
mattudes vastuoludesse,
muutudes peaaegu julmaks oma õiglusejanus.
Robota! Kui päike pageb monoliitide kohalt,
tähistavad nad vägisi
öö salajast geomeetriat.
Õudne on inimesel vaadata
neid metsikuid mudeleid.
Kuuntele, romantiikkaa, 2002'''.split('\n')
class TestDisambiguator(unittest.TestCase):
"""Test the separate disambiguate function
against the built in disambiguate=True function.
Both must work the same."""
def test_disambiguator(self):
for sentence in sentences:
an_with = analyze(sentence)
an_without = analyze(sentence, disambiguate=False)
disamb = disambiguate(an_without)
self.assertListEqual(an_with, disamb)
| estnltk/estnltk | estnltk/vabamorf/tests/test_disambiguate.py | Python | gpl-2.0 | 1,255 |
# Copyright (C) 2008-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
HTML utils
"""
| linkcheck/linkchecker | linkcheck/htmlutil/__init__.py | Python | gpl-2.0 | 770 |
#!/usr/bin/env python
"""spoke -- Git plugin for GitHub integration
Copyright (C) 2012 Alex Headley <aheadley@waysaboutstuff.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import argparse
import os
from pprint import pprint
import functools
import inspect
import textwrap
import tempfile
import time
import subprocess
import git
import pygithub3
def guess_type(obj):
ok_types = [int, str, bool]
obj_type = type(obj)
if obj_type in ok_types:
return obj_type
else:
if obj_type == list or obj_type == tuple:
if len(obj):
obj_e_type = type(obj[0])
if obj_e_type in ok_types and \
all(type(e) == obj_e_type for e in obj[1:]):
return obj_e_type
return str
def guess_action(obj):
return {
bool: 'store_false' if obj else 'store_true',
}.get(guess_type(obj), 'store')
def guess_nargs(obj):
if guess_type(obj) == bool:
return 0
else:
try:
len(obj)
except TypeError:
return 1
else:
return '+'
def get_console_size():
with os.popen('stty size', 'r') as p:
return map(int, p.read().strip().split())
class ArgFunc(object):
@staticmethod
def define_args(**kwargs):
def wrapper(func):
for (arg, attrs) in kwargs.iteritems():
if 'default' in attrs and 'name' not in attrs:
attrs['name'] = '--' + arg.replace('_', '-')
if 'dest' not in attrs and 'name' in attrs:
attrs['dest'] = arg
func._argfunc_attrs = kwargs
return func
return wrapper
@staticmethod
def auto_define_args(func):
(args, pargs, kwargs, defaults) = inspect.getargspec(func)
if args[0] == 'self' or args[0] == 'cls':
args = args[1:]
defaults = defaults if defaults is not None else []
arg_no_defaults = args[:-len(defaults)]
arg_defaults = zip(args[-len(defaults):], defaults)
attrs = {}
for arg in arg_no_defaults:
arg_attrs = {
'metavar': arg.upper(),
}
attrs[arg] = arg_attrs
for (arg, default) in arg_defaults:
arg_attrs = {
'name': '--' + arg.replace('_', '-'),
'action': guess_action(default),
'default': default,
'dest': arg,
}
attrs[arg] = arg_attrs
if pargs is not None:
attrs[pargs] = {
'name': pargs,
'nargs': '*',
}
if kwargs is not None:
pass
func._argfunc_attrs = attrs
return func
def add_func(self, parser, func):
if hasattr(func, '_argfunc_attrs'):
for (arg, attrs) in func._argfunc_attrs.iteritems():
fixed_attrs = attrs.copy()
if 'name' in attrs:
command_name = fixed_attrs.pop('name')
fixed_attrs['dest'] = arg
else:
command_name = arg
parser.add_argument(command_name, **fixed_attrs)
def add_obj(self, parser, obj):
for func in (a for a in dir(obj) \
if callable(obj, a) and hasattr(getattr(obj, a), '_argfunc_attrs')):
self.add_func(parser, func)
class GithubActor(object):
"""
"""
CONFIG_NS = 'hub'
GIT_REMOTE_NAME = 'github'
FALLBACK_EDITOR = 'nano'
_current_repo = None
_current_user = None
_github = None
def __init__(self, output=None):
self._current_repo = self._init_repo()
creds = self._get_github_credentials(self._current_repo)
self._current_user = creds[0]
self._github = self._init_github(creds[0], creds[1], self._current_repo)
if output is not None:
self._output = output
def _output(self, obj, *pargs, **kwargs):
if issubclass(obj.__class__, basestring):
print unicode(obj).format(*pargs, **kwargs)
else:
try:
pprint(obj, indent=2)
except Exception:
print repr(obj)
def _init_repo(self):
try:
repo = git.Repo(os.getcwd())
except git.exc.InvalidGitRepositoryError:
repo = None
return repo
def _init_github(self, username, password, repo=None):
repo_name = self._get_repo_name(repo)
return pygithub3.Github(login=username, password=password,
user=username, repo=repo_name)
@property
def _current_repo_name(self):
return self._get_repo_name(self._current_repo)
def _get_repo_name(self, repo):
if repo is not None:
return os.path.basename(repo.working_tree_dir)
else:
return None
def _get_github_credentials(self, repo=None):
if repo is None:
user_cfg_file = os.path.expanduser('~/.gitconfig')
if os.path.exists(user_cfg_file):
cfg = git.config.GitConfigParser(user_cfg_file)
else:
raise ValueError("""Can\'t find a gitconfig file for github login info.
Set the login info with:
git config --global --add {0}.username <username>
git config --global --add {0}.password <password>
""".format(self.CONFIG_NS))
else:
cfg = repo.config_reader()
return (cfg.get_value(self.CONFIG_NS, 'username'),
cfg.get_value(self.CONFIG_NS, 'password'))
def _get_padding(self, f, iterable):
return max(len(f(i)) for i in iterable)
def _require_in_repo(func):
@functools.wraps(func)
def wrapper(self, *pargs, **kwargs):
if self._current_repo is None:
self._output('You need to be in a repo for this command')
else:
return func(self, *pargs, **kwargs)
try:
wrapper._argfunc_attrs = func._argfunc_attrs
except AttributeError:
pass
return wrapper
@ArgFunc.auto_define_args
def develop(self, org=None, **kwargs):
"""Clone a repo so you can start working on it, forking to your account
if needed
"""
target_user = kwargs.get('user', self._current_user)
target_repo = kwargs.get('repo', self._current_repo_name)
if os.path.exists(os.path.join(os.getcwd(), target_repo)):
raise ValueError('Looks like the repo already exists at {0}'.format(
os.path.join(os.getcwd(), target_repo)))
if target_user != self._current_user:
#need to fork first
self._output('Looks like someone else\'s repo, forking...')
try:
fork = self._github.repos.forks.create(
user=target_user,
repo=target_repo,
org=org,
)
except AssertionError:
pass
self._output('Waiting for GitHub to stop forking around...')
time.sleep(5)
self._output('Getting repo info...')
gh_repo = self._github.repos.get(
user=self._current_user,
repo=target_repo,
)
repo_path = os.path.join(os.getcwd(), gh_repo.name)
self._output('Cloning repo {0} ...', gh_repo.full_name)
git.repo.base.Repo.clone_from(gh_repo.ssh_url, repo_path)
self._output('Repo cloned to {0}, enjoy!', repo_path)
@ArgFunc.auto_define_args
def repos_show(self, **kwargs):
"""Show a repo's info from GitHub
"""
display_tpl = '\n'.join([
'{repo.full_name: <48} {repo.language: <16} {repo.forks_count: >3} ' \
'Fork(s) {repo.watchers_count: >4} Watcher(s)',
'{repo.description}',
'{repo.html_url: <64} {repo.homepage}',
])
gh_repo = self._github.repos.get(
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output(display_tpl, repo=gh_repo)
@ArgFunc.define_args(
repo_type={'choices': ('all', 'owner', 'public', 'private', 'member'), 'default': 'all'},
)
def repos_list(self, repo_type='all', **kwargs):
"""List your or another user's repos
"""
repos = self._github.repos.list(
user=kwargs.get('user', self._current_user),
type=repo_type).all()
padding = self._get_padding(lambda r: r.name, repos)
for repo in repos:
fork_icon = 'V' if repo.fork else '|'
self._output(' {fork_icon} {name: <{padding}} -- {description}',
fork_icon=fork_icon, padding=padding, **vars(repo))
@ArgFunc.auto_define_args
def repos_create(self, description='', homepage='', private=False,
has_issues=False, has_wiki=False, has_downloads=False, in_org=None,
**kwargs):
"""Create a new repo on GitHub
"""
data = locals().copy()
del data['self'], data['kwargs'], data['in_org']
data['name'] = kwargs.get('repo', self._current_repo_name)
new_repo = self._github.repos.create(data, in_org)
@ArgFunc.auto_define_args
def repos_fork(self, org=None, **kwargs):
"""Fork a repo on GitHub to your account (or organization)
"""
try:
self._github.repos.forks.create(
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name),
org=org)
except AssertionError:
pass
@ArgFunc.auto_define_args
def repos_clone(self, **kwargs):
"""Clone a repo from GitHub
"""
repo_name = kwargs.get('repo', None)
if repo_name is None:
raise ValueError('Use --repo to tell me the repo name')
try:
github_repo = self._github.repos.get(
user=kwargs.get('user', self._current_user),
repo=repo_name)
except Exception as e:
#TODO make this not dumb
raise e
repo_path = os.path.join(os.getcwd(), repo_name)
if github_repo.permissions['push']:
git.repo.base.Repo.clone_from(github_repo.ssh_url, repo_path)
else:
git.repo.base.Repo.clone_from(github_repo.git_url, repo_path)
self._output('Cloned {user}/{repo} to {path}',
user=kwargs.get('user', self._current_user),
repo=repo_name,
path=repo_path)
@_require_in_repo
@ArgFunc.auto_define_args
def repos_addremote(self, remote_name=GIT_REMOTE_NAME, **kwargs):
"""Add a remote for the corresponding repo on GitHub
"""
actual_repo = self._current_repo
if remote_name in (rm.name for rm in actual_repo.remotes):
self._output('Looks like the "{0}" remote already exists',
remote_name)
else:
github_repo = self._github.repos.get(
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
if github_repo.permissions['push']:
#read-write, use ssh url
actual_repo.create_remote(remote_name, github_repo.ssh_url)
else:
#read only, use git url
actual_repo.create_remote(remote_name, github_repo.git_url)
self._output('"{0}" remote added', remote_name)
@ArgFunc.auto_define_args
def pr_show(self, pr_number, DUMMYOPT=None, **kwargs):
"""Display a pull request
"""
pr = self._github.pull_requests.get(pr_number,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output(vars(pr))
@ArgFunc.define_args(
state={'choices': ('open', 'closed'), 'default': 'open'},
)
def pr_list(self, state='open', **kwargs):
"""List the open pull requests for a repo
Note that the --state option is currently non-functional
"""
pull_requests = self._github.pull_requests.list(
user=kwargs.get('user', kwargs.get('user', self._current_user)),
repo=kwargs.get('repo', self._current_repo_name)).all()
padding = self._get_padding(lambda pr: pr.user['login'], pull_requests)
for pr in pull_requests:
commit_count = len(self._github.pull_requests.list_commits(pr.number,
user=kwargs.get('user', kwargs.get('user', self._current_user)),
repo=kwargs.get('repo', self._current_repo_name)).all())
self._output('#{number:0>4} {commit_count:0>2}c @{user[login]: <{padding}} {title} -- <{html_url}>',
padding=padding, commit_count=commit_count, **vars(pr))
@ArgFunc.auto_define_args
def pr_merge(self, pr_number, commit_message='', **kwargs):
"""Do a simple merge of a pull request (Merge Button)
"""
self._github.pull_requests.merge(number, commit_message,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output('Pull request #{0:0>4} merged!', pr_number)
@_require_in_repo
@ArgFunc.auto_define_args
def pr_addremote(self, pr_number, remote_name=None, **kwargs):
"""Add a remote for the source repo in a PR
"""
if remote_name is None:
remote_name = 'pr-{n:0>4}'.format(n=pr_number)
repo = self._current_repo
pr = self._github.pull_requests.get(pr_number,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
if remote_name in (rm.name for rm in repo.remotes):
self._output('Looks like the "{0}" remote already exists',
remote_name)
else:
repo.create_remote(remote_name, pr.head['repo']['git_url'])
self._output('"{0}" remote added', remote_name)
@ArgFunc.auto_define_args
def issues_show(self, issue_number, DUMMYOPT=None, **kwargs):
"""Display a specific issue
"""
issue = self._github.issues.get(issue_number,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
msg = [
'#{i.number:0>4} ({i.state}) -- {i.title}',
'@{i.user.login}:',
]
if issue.body:
msg.append(self._wrap_text_body(issue.body))
self._output('\n'.join(msg), i=issue)
comments = self._github.issues.comments.list(issue_number,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name)).all()
for comment in comments:
self._output('@{c.user.login}:\n{wrapped_body}',
c=comment, wrapped_body=self._wrap_text_body(comment.body))
def _wrap_text_body(self, text, padding=8):
"""Wrap :text: so that there are :padding: spaces on either side, based on
terminal width
"""
console_width = max(get_console_size()[1], padding * 3)
return '\n'.join(' ' * padding + line \
for line in textwrap.wrap(text.strip(), console_width - (padding * 2)))
@ArgFunc.auto_define_args
def issues_list(self, milestone='none', state='open', assignee='none', labels='',
sort='created', **kwargs):
"""List a repo's issues
"""
issues = self._github.issues.list_by_repo(
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name),
state=state,
assignee=assignee,
milestone=milestone,
labels=labels,
sort=sort,
)
for page in issues:
for issue in page:
self._output('#{issue.number:0>4} ({issue.state}) @{issue.user.login: <16} -- {issue.title}',
issue=issue)
@ArgFunc.auto_define_args
def issues_create(self, title=None, body=None, assignee=None, milestone=None,
labels=None, **kwargs):
"""Open a new issue
"""
data = locals().copy()
del data['self'], data['kwargs']
if data['labels'] is not None:
data['labels'] = [l.strip() for l in data['labels'].split(',')]
if data['body'] is None:
(_, path) = tempfile.mkstemp()
with open(path, 'w') as handle:
handle.write('# Put the body of your issue here\n' \
'# Lines starting with \'#\' are ignored\n' \
'# If you didn\'t provide a title, the first line here will be used\n')
subprocess.call([self._get_editor(), path])
with open(path, 'r') as handle:
body = [line.rstrip() for line in handle.readlines() \
if not line.startswith('#') and line.strip()]
if not data['title']:
data['title'] = body[0].strip()
data['body'] = '\n'.join(body[1:])
else:
data['body'] = '\n'.join(body)
os.unlink(path)
issue = self._github.issues.create(data,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output('Issue #{issue.number:0>4} created: {issue.html_url}',
issue=issue)
def _get_editor(self):
"""Get the editor from env variables
Looks at $EDITOR, then $VISUAL, then falls back to :FALLBACK_EDITOR:
"""
return os.environ.get('EDITOR',
os.environ.get('VISUAL',
self.FALLBACK_EDITOR))
@ArgFunc.auto_define_args
def issues_comment(self, issue_number, message=None, close=False, **kwargs):
"""Add a comment to an issue
"""
if message is None:
(_, path) = tempfile.mkstemp()
with open(path, 'w') as handle:
handle.write('# Write your comment here\n' \
'# Lines starting with \'#\' are ignored\n')
subprocess.call([self._get_editor(), path])
with open(path, 'r') as handle:
message = '\n'.join(line.rstrip() for line in handle.readlines() \
if not line.startswith('#') and line.strip())
os.unlink(path)
comment = self._github.issues.comments.create(issue_number, message,
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output('Comment {comment.id} added!', comment=comment)
if close:
self._github.issues.update(issue_number, {'state': 'closed'},
user=kwargs.get('user', self._current_user),
repo=kwargs.get('repo', self._current_repo_name))
self._output('Issue closed')
def build_parser(actor):
af = ArgFunc()
parser = argparse.ArgumentParser(description='git-hub - Do stuff with GitHub',
prog='git-hub')
parser.add_argument('--verbose', help='Display more output', action='store_true')
command_parsers = parser.add_subparsers(title='GitHub commands',
dest='command')
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-u', '--user', help='Override target username')
parent_parser.add_argument('-r', '--repo', help='Override target repo name')
#oh god wat
command_verbs = dict((c, [v.split('_', 1)[1] for v in dir(actor) \
if v.startswith(c+'_') and callable(getattr(actor, v))]) \
for c in set(c.split('_')[0] for c in dir(actor) \
if not c.startswith('_') and callable(getattr(actor, c))))
for command in command_verbs:
for verb in command_verbs[command]:
command_verb = command + '_' + verb
cv_func = getattr(actor, command_verb)
attrs = {'parents': [parent_parser]}
try:
attrs['help'] = cv_func.__doc__.split('\n')[0].strip()
except AttributeError:
pass
verb_parser = command_parsers.add_parser(
command_verb.replace('_', '-'), **attrs)
af.add_func(verb_parser, cv_func)
develop_parser = command_parsers.add_parser('develop',
help=actor.develop.__doc__.split('\n')[0].strip(),
parents=[parent_parser])
af.add_func(develop_parser, actor.develop)
return parser
def main():
actor = GithubActor()
parser = build_parser(actor)
result = parser.parse_args()
command_verb = result.command.replace('-', '_')
del result.command
action = getattr(actor, command_verb)
return action(**vars(result))
if __name__ == '__main__':
main()
| aheadley/spoke | spoke.py | Python | gpl-2.0 | 21,775 |
# -*- coding: utf-8 -*-
from gi.repository import GLib, GObject, Gio
from dfeet import dbus_utils
def args_signature_markup(arg_signature):
return '<small><span foreground="#2E8B57">%s</span></small>' % (arg_signature)
def args_name_markup(arg_name):
return '<small>%s</small>' % (arg_name,)
class DBusNode(GObject.GObject):
"""object to represent a DBus Node (object path)"""
def __init__(self, name, object_path, node_info):
GObject.GObject.__init__(self)
self.__name = name
self.__object_path = object_path
self.__node_info = node_info # Gio.GDBusNodeInfo object
def __repr__(self):
return "Name: %s ; ObjPath: %s ; NodeInfo: %s" % (
self.name, self.object_path, self.node_info)
@property
def name(self):
return self.__name
@property
def object_path(self):
return self.__object_path
@property
def node_info(self):
return self.__node_info
class DBusInterface(DBusNode):
"""object to represent a DBus Interface"""
def __init__(self, dbus_node_obj, iface_info):
DBusNode.__init__(self, dbus_node_obj.name,
dbus_node_obj.object_path, dbus_node_obj.node_info)
self.__iface_info = iface_info # Gio.GDBusInterfaceInfo object
def __repr__(self):
return "iface '%s' on node '%s'" % (self.iface_info.name, self.node_info.path)
@property
def iface_info(self):
return self.__iface_info
class DBusProperty(DBusInterface):
"""object to represent a DBus Property"""
def __init__(self, dbus_iface_obj, property_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__property_info = property_info # Gio.GDBusPropertyInfo object
self.__value = None # the value
def __repr__(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
return "%s %s (%s)" % (sig, self.property_info.name, self.property_info.flags)
@property
def property_info(self):
return self.__property_info
@property
def value(self):
return self.__value
@value.setter
def value(self, new_val):
self.__value = new_val
@property
def markup_str(self):
sig = dbus_utils.sig_to_string(self.property_info.signature)
readwrite = list()
if self.readable:
readwrite.append("read")
if self.writable:
readwrite.append("write")
s = "%s %s <small>(%s)</small>" % (
args_signature_markup(sig),
args_name_markup(self.property_info.name), " / ".join(readwrite))
if self.value is not None:
s += " = %s" % (GLib.markup_escape_text(str(self.value), -1),)
return s
@property
def readable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.READABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
@property
def writable(self):
if int(self.property_info.flags) == int(Gio.DBusPropertyInfoFlags.WRITABLE) or \
int(self.property_info.flags) == \
(int(Gio.DBusPropertyInfoFlags.WRITABLE | Gio.DBusPropertyInfoFlags.READABLE)):
return True
else:
return False
class DBusSignal(DBusInterface):
"""object to represent a DBus Signal"""
def __init__(self, dbus_iface_obj, signal_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__signal_info = signal_info # Gio.GDBusSignalInfo object
def __repr__(self):
return "%s" % (self.signal_info.name)
@property
def signal_info(self):
return self.__signal_info
@property
def args(self):
args = list()
for arg in self.signal_info.args:
sig = dbus_utils.sig_to_string(arg.signature)
args.append({'signature': sig, 'name': arg.name})
return args
@property
def args_markup_str(self):
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join('%s' % (args_signature_markup(arg['signature'])) for arg in self.args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def markup_str(self):
return "%s %s" % (self.signal_info.name, self.args_markup_str)
class DBusMethod(DBusInterface):
"""object to represent a DBus Method"""
def __init__(self, dbus_iface_obj, method_info):
DBusInterface.__init__(self, dbus_iface_obj, dbus_iface_obj.iface_info)
self.__method_info = method_info # Gio.GDBusMethodInfo object
def __repr__(self):
return "%s(%s) ↦ %s (%s)" % (
self.method_info.name, self.in_args_str,
self.out_args_str, DBusInterface.__repr__(self))
@property
def in_args_code(self):
in_args = ""
for a in self.__method_info.in_args:
in_args += a.signature
return in_args
@property
def method_info(self):
return self.__method_info
@property
def markup_str(self):
return "%s %s <b>↦</b> %s" % (
self.method_info.name, self.in_args_markup_str, self.out_args_markup_str)
@property
def in_args(self):
in_args = list()
for in_arg in self.method_info.in_args:
sig = dbus_utils.sig_to_string(in_arg.signature)
in_args.append({'signature': sig, 'name': in_arg.name})
return in_args
@property
def out_args(self):
out_args = list()
for out_arg in self.method_info.out_args:
sig = dbus_utils.sig_to_string(out_arg.signature)
out_args.append({'signature': sig, 'name': out_arg.name})
return out_args
@property
def in_args_str(self):
result = ""
for arg in self.in_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
@property
def out_args_str(self):
result = ""
for arg in self.out_args:
result += "%s %s, " % (arg['signature'], arg['name'])
return result[0:-2]
def __args_markup_str(self, args):
"""markup a given list of args"""
result = ''
result += '<span foreground="#FF00FF">(</span>'
result += ', '.join(
'%s %s' % (
args_signature_markup(arg['signature']),
args_name_markup(arg['name'])) for arg in args)
result += '<span foreground="#FF00FF">)</span>'
return result
@property
def in_args_markup_str(self):
return self.__args_markup_str(self.in_args)
@property
def out_args_markup_str(self):
return self.__args_markup_str(self.out_args)
class DBusAnnotation(DBusInterface):
"""object to represent a DBus Annotation"""
def __init__(self, dbus_iface_obj, annotation_info):
DBusInterface.__init__(self, dbus_iface_obj,
dbus_iface_obj.iface_info)
self.__annotation_info = annotation_info # Gio.GDBusAnnotationInfo object
def __repr__(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
@property
def annotation_info(self):
return self.__annotation_info
@property
def markup_str(self):
return "%s: %s" % (self.annotation_info.key, self.annotation_info.value)
| GNOME/d-feet | src/dfeet/introspection_helper.py | Python | gpl-2.0 | 7,626 |
import logging
from autotest_lib.client.common_lib import error
import kvm_test_utils
def run_timedrift_with_migration(test, params, env):
"""
Time drift test with migration:
1) Log into a guest.
2) Take a time reading from the guest and host.
3) Migrate the guest.
4) Take a second time reading.
5) If the drift (in seconds) is higher than a user specified value, fail.
@param test: KVM test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Collect test parameters:
# Command to run to get the current time
time_command = params.get("time_command")
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params.get("time_filter_re")
# Time format for time.strptime()
time_format = params.get("time_format")
drift_threshold = float(params.get("drift_threshold", "10"))
drift_threshold_single = float(params.get("drift_threshold_single", "3"))
migration_iterations = int(params.get("migration_iterations", 1))
try:
# Get initial time
# (ht stands for host time, gt stands for guest time)
(ht0, gt0) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
# Migrate
for i in range(migration_iterations):
# Get time before current iteration
(ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
session.close()
# Run current iteration
logging.info("Migrating: iteration %d of %d...",
(i + 1), migration_iterations)
vm.migrate()
# Log in
logging.info("Logging in after migration...")
session = vm.wait_for_login(timeout=30)
logging.info("Logged in after migration")
# Get time after current iteration
(ht1_, gt1_) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
# Report iteration results
host_delta = ht1_ - ht0_
guest_delta = gt1_ - gt0_
drift = abs(host_delta - guest_delta)
logging.info("Host duration (iteration %d): %.2f",
(i + 1), host_delta)
logging.info("Guest duration (iteration %d): %.2f",
(i + 1), guest_delta)
logging.info("Drift at iteration %d: %.2f seconds",
(i + 1), drift)
# Fail if necessary
if drift > drift_threshold_single:
raise error.TestFail("Time drift too large at iteration %d: "
"%.2f seconds" % (i + 1, drift))
# Get final time
(ht1, gt1) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
finally:
if session:
session.close()
# Report results
host_delta = ht1 - ht0
guest_delta = gt1 - gt0
drift = abs(host_delta - guest_delta)
logging.info("Host duration (%d migrations): %.2f",
migration_iterations, host_delta)
logging.info("Guest duration (%d migrations): %.2f",
migration_iterations, guest_delta)
logging.info("Drift after %d migrations: %.2f seconds",
migration_iterations, drift)
# Fail if necessary
if drift > drift_threshold:
raise error.TestFail("Time drift too large after %d migrations: "
"%.2f seconds" % (migration_iterations, drift))
| ceph/autotest | client/tests/kvm/tests/timedrift_with_migration.py | Python | gpl-2.0 | 3,949 |
from django.contrib import admin
from bds.models import Packet
from bds.models import CT,admins
# Register your models here.
admin.site.register(Packet)
admin.site.register(CT)
admin.site.register(admins)
| tremblerz/breach-detection-system | dashboard/bds/admin.py | Python | gpl-3.0 | 206 |
from glob import glob
import subprocess
import vagrant
from fabric.api import execute, env, quiet
from fabric.state import connections
from logger import init_logger, debug, info
VM_NAME = "default"
def clear_fabric_cache():
"""
Fabric caches it's connections, so it won't have to re-connect every time you use it.
But, when working with VMs whose connections are getting reset, we can't use a cache.
Use this function to reset fabric's cache
"""
connection_keys = connections.keys()
for host_string in connection_keys:
connections[host_string].close()
del connections[host_string]
def get_all_test_functions():
"""
Get all the tests from the current directory
Looking for python files starting with "test", and within, functions that start with "test"
"""
test_files = glob("test*.py")
test_modules = [__import__(module_name[:-3]) for module_name in test_files]
test_tasks = []
for test_module in test_modules:
functions_in_module = dir(test_module)
test_functions = [func for func in functions_in_module if func.startswith("test")]
for test_function in test_functions:
test_tasks.append(test_module.__dict__[test_function])
return test_tasks
def vagrant_run_command(command):
"""
Run the given command in a shell, after preceding it with "vagrant"
"""
subprocess.call("vagrant " + command, shell=True, stdout=subprocess.PIPE)
def vagrant_take_snapshot():
"""
Take a snapshot from the running machine, and name it "snapshot"
"""
vagrant_run_command("snapshot take snapshot")
def vagrant_revert_to_snapshot():
"""
In the running machine, revert to the last snapshot
"""
vagrant_run_command("snapshot back")
def init_fabric(vclient):
"""
init all the required environment for fabric
"""
env.host_string = vclient.user_hostname_port(vm_name=VM_NAME)
env.key_filename = vclient.keyfile(vm_name=VM_NAME)
env.disable_known_hosts = True
env.quiet = True
env.warn_only = True
def main():
"""
Gobi's main function.
Finds the test functions, runs the machine, connects to them, and runs the tests
"""
init_logger()
info("Welcome to gobi. Sit back and relax :)")
vclient = vagrant.Vagrant()
test_funcs = get_all_test_functions()
assert test_funcs > 0, "No tests found. What do you want me to run?"
info("Found %d tests to run" % len(test_funcs))
info("Setting up the environment...")
vclient.up()
info("Environment is up and ready")
debug("Taking snapshot...")
vagrant_take_snapshot()
debug("Snapshot taken")
init_fabric(vclient)
counter = 1
for task in test_funcs:
# After the first test, clean - delete cache and revert to snapshot
if counter != 1:
clear_fabric_cache()
debug("Reverting to snapshot...")
vagrant_revert_to_snapshot()
debug("Reverted!")
info("Running test number %d - %s" % (counter, task.__name__))
execute(task)
counter += 1
info("All tests finished")
info("Destroying environment...")
vclient.destroy()
info("Environment has been destroyed...")
info("Gobi, out")
if __name__ == "__main__":
main() | shohamp/Gobi | runner/gobi_runner.py | Python | gpl-3.0 | 3,321 |
## $Id: tools.py 23525 2011-05-12 04:11:40Z davea $
import configxml
try:
# use new hashlib if available
from hashlib import md5
except:
import md5
import os, shutil, binascii, filecmp
# from http://www.plope.com/software/uuidgen/view
_urandomfd = None
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
global _urandomfd
if _urandomfd is None:
try:
_urandomfd = os.open("/dev/urandom", os.O_RDONLY)
except:
_urandomfd = NotImplementedError
if _urandomfd is NotImplementedError:
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += os.read(_urandomfd, n - len(bytes))
return bytes
def make_uuid():
return binascii.hexlify(urandom(16))
def md5_file(path):
"""
Return a 16-digit MD5 hex digest of a file's contents
Read the file in chunks
"""
chunk = 8096
try:
checksum = md5()
except NameError:
checksum = md5.new()
fp = open(path, 'r')
while True:
buffer = fp.read(chunk)
if not buffer:
break
checksum.update(buffer)
fp.close()
return checksum
def file_size(path):
"""Return the size of a file"""
f = open(path)
f.seek(0,2)
return f.tell()
def query_yesno(str):
'''Query user; default Yes'''
print str, "[Y/n] ",
return not raw_input().strip().lower().startswith('n')
def query_noyes(str):
'''Query user; default No'''
print str, "[y/N] ",
return raw_input().strip().lower().startswith('y')
def get_output_file_path(filename):
""" Return the filename's path in the upload directory
Use this if you're developing a validator/assimilator in Python
"""
config = configxml.default_config()
fanout = long(config.config.uldl_dir_fanout)
s = md5.new(filename).hexdigest()[1:8]
x = long(s, 16)
return "%s/%x/%s" % (config.config.upload_dir, x % fanout, filename)
| asimonov-im/boinc | py/Boinc/tools.py | Python | gpl-3.0 | 2,065 |
from django.core.management.base import BaseCommand
from lizard_blockbox import import_helpers
class Command(BaseCommand):
args = ""
help = "Merge the measure shapes to get one json."
def handle(self, *args, **kwargs):
import_helpers.merge_measures_blockbox(self.stdout)
| lizardsystem/lizard-blockbox | lizard_blockbox/management/commands/merge_measures_blockbox.py | Python | gpl-3.0 | 294 |
"""
API operations on Group objects.
"""
import logging
from galaxy.web.base.controller import BaseAPIController, url_for
from galaxy import web
log = logging.getLogger( __name__ )
class GroupRolesAPIController( BaseAPIController ):
@web.expose_api
@web.require_admin
def index( self, trans, group_id, **kwd ):
"""
GET /api/groups/{encoded_group_id}/roles
Displays a collection (list) of groups.
"""
decoded_group_id = trans.security.decode_id( group_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
except:
group = None
if not group:
trans.response.status = 400
return "Invalid group id ( %s ) specified." % str( group_id )
rval = []
try:
for gra in group.roles:
role = gra.role
encoded_id = trans.security.encode_id( role.id )
rval.append( dict( id = encoded_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=encoded_id, ) ) )
except Exception, e:
rval = "Error in group API at listing roles"
log.error( rval + ": %s" % str(e) )
trans.response.status = 500
return rval
@web.expose_api
@web.require_admin
def show( self, trans, id, group_id, **kwd ):
"""
GET /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Displays information about a group role.
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
item = None
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) ) # TODO Fix This
if not item:
item = "role %s not in group %s" % (role.name,group.name)
except Exception, e:
item = "Error in group_role API group %s role %s" % (group.name, role.name)
log.error(item + ": %s" % str(e))
return item
@web.expose_api
@web.require_admin
def update( self, trans, id, group_id, **kwd ):
"""
PUT /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Adds a role to a group
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
item = None
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
if not item:
gra = trans.app.model.GroupRoleAssociation( group, role )
# Add GroupRoleAssociation
trans.sa_session.add( gra )
trans.sa_session.flush()
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
except Exception, e:
item = "Error in group_role API Adding role %s to group %s" % (role.name,group.name)
log.error(item + ": %s" % str(e))
return item
@web.expose_api
@web.require_admin
def delete( self, trans, id, group_id, **kwd ):
"""
DELETE /api/groups/{encoded_group_id}/roles/{encoded_role_id}
Removes a role from a group
"""
role_id = id
decoded_group_id = trans.security.decode_id( group_id )
decoded_role_id = trans.security.decode_id( role_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
for gra in group.roles:
if gra.role == role:
trans.sa_session.delete( gra )
trans.sa_session.flush()
item = dict( id = role_id,
name = role.name,
url = url_for( 'group_role', group_id=group_id, id=role_id) )
if not item:
item = "role %s not in group %s" % (role.name,group.name)
except Exception, e:
item = "Error in group_role API Removing role %s from group %s" % (role.name,group.name)
log.error(item + ": %s" % str(e))
return item
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/galaxy/api/group_roles.py | Python | gpl-3.0 | 5,199 |
'''
Python program for implementation of Merge Sort
l is left index, m is middle index and r is right index
L[l...m] and R[m+1.....r] are respective left and right sub-arrays
'''
def merge(arr, l, m, r):
n1 = m - l + 1
n2 = r-m
#create temporary arrays
L = [0]*(n1)
R = [0]*(n2)
#Copy data to temp arrays L[] and R[]
for i in range(0, n1):
L[i] = arr[l + i]
for j in range(0, n2):
R[j] = arr[m+1+j]
# Merge the temp array back into arr[l...r]
i = 0 # Initial index of first subarray
j = 0 # Initial index of second subarray
k = l # Initial index of merged subarray
#Comparing the elements of the array and filling them into one array
while i < n1 and j < n2 :
if L[i] <= R[j] :
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
# Copy the remaining element of L[], if there are any
while i < n1:
arr[k] = L[i]
i += 1
k += 1
# Copy the remaining element of R[], if there are any
while j < n2:
arr[k] R[j]
j += 1
k += 1
# l is for left index and r is for right index of the
# subarray of arr to be sorted
def mergeSort(arr, l, r):
if l < r:
#Same as (l+r)/2, but avoid overflow for large l and h
m = (l+(r-1))/2
# Sort first and second halves
mergeSort(arr, l, m)
mergeSort(arr, m+1, r)
merge(arr, l, m, r)
| tannmay/Algorithms-1 | Sorting/Codes/mergeSort.py | Python | gpl-3.0 | 1,313 |
"""Module containing helper functions that are used by other parts of worker."""
import datetime
import getpass
import json
import logging
import signal
import re
from contextlib import contextmanager
from os import path as os_path, walk, getcwd, chdir, environ as os_environ, killpg, getpgid
from queue import Queue, Empty
from shlex import split
from subprocess import Popen, PIPE, check_output, CalledProcessError, TimeoutExpired
from threading import Thread
from traceback import format_exc
from urllib.parse import unquote, urlparse, parse_qs
import tenacity
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError
from requests.packages.urllib3.util.retry import Retry
from selinon import StoragePool
from sqlalchemy.exc import SQLAlchemyError
from f8a_worker.enums import EcosystemBackend
from f8a_worker.errors import (TaskError,
NotABugTaskError,
F8AConfigurationException)
from f8a_worker.models import (Analysis,
Ecosystem,
Package,
Version)
from f8a_worker.defaults import configuration
logger = logging.getLogger(__name__)
def get_latest_analysis(ecosystem, package, version, db_session=None):
"""Get latest analysis for the given EPV."""
if not db_session:
storage = StoragePool.get_connected_storage("BayesianPostgres")
db_session = storage.session
try:
return db_session.query(Analysis). \
filter(Ecosystem.name == ecosystem). \
filter(Package.name == package). \
filter(Version.identifier == version). \
order_by(Analysis.started_at.desc()). \
first()
except SQLAlchemyError:
db_session.rollback()
raise
@contextmanager
def cwd(target):
"""Manage cwd in a pushd/popd fashion."""
curdir = getcwd()
chdir(target)
try:
yield
finally:
chdir(curdir)
@contextmanager
def username():
"""Workaround for failing getpass.getuser().
http://blog.dscpl.com.au/2015/12/unknown-user-when-running-docker.html
"""
user = ''
try:
user = getpass.getuser()
except KeyError:
os_environ['LOGNAME'] = 'f8aworker'
try:
yield
finally:
if not user:
del os_environ['LOGNAME']
def assert_not_none(name, value):
"""Assert value is not None."""
if value is None:
raise ValueError('Parameter %r is None' % name)
class TimedCommand(object):
"""Execute arbitrary shell command in a timeout-able manner."""
def __init__(self, command):
"""Initialize command."""
# parse with shlex if not execve friendly
if isinstance(command, str):
command = split(command)
self.command = command
def run(self, timeout=None, is_json=False, **kwargs):
"""Run the self.command and wait up to given time period for results.
:param timeout: how long to wait, in seconds, for the command to finish
before terminating it
:param is_json: hint whether output of the command is a JSON
:return: triplet (return code, stdout, stderr), stdout will be a
dictionary if `is_json` is True
"""
logger.debug("running command '%s'; timeout '%s'", self.command, timeout)
# this gets executed in a separate thread
def target(**kwargs):
try:
self.process = Popen(self.command, universal_newlines=True, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except Exception:
self.output = {} if is_json else []
self.error = format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = PIPE
if 'update_env' in kwargs:
# make sure we update environment, not override it
kwargs['env'] = dict(os_environ, **kwargs['update_env'])
kwargs.pop('update_env')
# thread
thread = Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
# timeout reached, terminate the thread
if thread.is_alive():
logger.error('Command {cmd} timed out after {t} seconds'.format(cmd=self.command,
t=timeout))
# this is tricky - we need to make sure we kill the process with all its subprocesses;
# using just kill might create zombie process waiting for subprocesses to finish
# and leaving us hanging on thread.join()
# TODO: we should do the same for get_command_output!
killpg(getpgid(self.process.pid), signal.SIGKILL)
thread.join()
if not self.error:
self.error = 'Killed by timeout after {t} seconds'.format(t=timeout)
if self.output:
if is_json:
self.output = json.loads(self.output)
else:
self.output = [f for f in self.output.split('\n') if f]
return self.status, self.output, self.error
@staticmethod
def get_command_output(args, graceful=True, is_json=False, timeout=300, **kwargs):
"""Wrap the function to get command output with implicit timeout of 5 minutes."""
kwargs['timeout'] = 10800
return get_command_output(args, graceful, is_json, **kwargs)
def get_command_output(args, graceful=True, is_json=False, **kwargs):
"""Improved version of subprocess.check_output.
:param graceful: bool, if False, raise Exception when command fails
:param is_json: bool, if True, return decoded json
:return: list of strings, output which command emitted
"""
logger.debug("running command %s", args)
try:
# Using universal_newlines mostly for the side-effect of decoding
# the output as UTF-8 text on Python 3.x
out = check_output(args, universal_newlines=True, **kwargs)
except (CalledProcessError, TimeoutExpired) as ex:
# TODO: we may want to use subprocess.Popen to be able to also print stderr here
# (while not mixing it with stdout that is returned if the subprocess succeeds)
if isinstance(ex, TimeoutExpired):
logger.warning("command %s timed out:\n%s", args, ex.output)
else:
logger.warning("command %s ended with %s\n%s", args, ex.returncode, ex.output)
if not graceful:
logger.error(ex)
# we don't know whether this is a bug or the command was simply called
# with invalid/unsupported input. Caller needs to catch the exception
# and decide.
raise TaskError("Error during running command %s: %r" % (args, ex.output))
else:
logger.debug("Ignoring because graceful flag is set")
return []
else:
if is_json:
# FIXME: some error handling here would be great
return json.loads(out)
else:
return [f for f in out.split('\n') if f] # py2 & 3 compat
def get_all_files_from(target, path_filter=None, file_filter=None):
"""Enumerate all files in target directory, can be filtered with custom delegates."""
for root, dirs, files in walk(target):
for file in files:
joined = os_path.abspath(os_path.join(root, file))
# filter the list early on
if path_filter and not path_filter(joined):
continue
if file_filter and not file_filter(file):
continue
yield joined
def hidden_path_filter(item):
"""Filter out hidden files or files in hidden directories."""
return not any(sub.startswith('.') for sub in item.split(os_path.sep))
def json_serial(obj):
"""Return time obj formatted according to ISO."""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
raise TypeError('Type {t} not serializable'.format(t=type(obj)))
def in_path(directory, path):
"""Check whether directory is in path.
:param directory: str
:param path: str
:return: True if directory is in path
"""
return any(directory == x for x in path.split(os_path.sep))
def skip_git_files(path):
"""Git skipping closure of in_path."""
return not in_path('.git', path)
class ThreadPool(object):
"""Implementation of thread pool."""
def __init__(self, target, num_workers=10, timeout=3):
"""Initialize `ThreadPool`.
:param target: Function that accepts exactly one argument
:param num_workers: int, number of worker threads to spawn
:param timeout: int, maximum number of seconds workers wait for new task
"""
self.target = target
self.num_workers = num_workers
self.timeout = timeout
self.queue = Queue()
self._threads = [Thread(target=self._work) for i in range(0, num_workers)]
def add_task(self, arg):
"""Enqueue a new task.
:param arg: argument for the `target` that was passed to constructor
"""
self.queue.put(arg)
def start(self):
"""Start processing by all threads."""
[t.start() for t in self._threads]
def join(self):
"""Join all threads."""
[t.join() for t in self._threads]
self.queue.join()
def _work(self):
while True:
try:
arg = self.queue.get(block=True, timeout=self.timeout)
except Empty:
break
try:
self.target(arg)
finally:
self.queue.task_done()
def __enter__(self):
"""Enter context manager."""
self.start()
return self
def __exit__(self, *_args, **_kwargs):
"""Exit context manager."""
self.join()
def compute_digest(target, function='sha256', raise_on_error=False):
"""Compute digest of a provided file.
:param target: str, file path
:param function: str, prefix name of the hashing function
:param raise_on_error: bool, raise an error when computation wasn't successful if set to True
:returns str or None, computed digest
`function` requires an executable with matching name on the system (sha256sum, sha1sum etc.)
"""
function += 'sum'
# returns e.g.:
# 65ecde5d025fcf57ceaa32230e2ff884ab204065b86e0e34e609313c7bdc7b47 /etc/passwd
data = TimedCommand.get_command_output([function, target], graceful=not raise_on_error)
try:
return data[0].split(' ')[0].strip()
except IndexError as exc:
logger.error("unable to compute digest of %r, likely it doesn't exist or is a directory",
target)
if raise_on_error:
raise RuntimeError("can't compute digest of %s" % target) from exc
class MavenCoordinates(object):
"""Represents Maven coordinates.
https://maven.apache.org/pom.html#Maven_Coordinates
"""
_default_packaging = 'jar'
def __init__(self, groupId, artifactId, version='',
classifier='', packaging=None):
"""Initialize attributes."""
self.groupId = groupId
self.artifactId = artifactId
self.classifier = classifier
self.packaging = packaging or MavenCoordinates._default_packaging
self.version = version
def is_valid(self):
"""Check if the current coordinates are valid."""
return self.groupId and self.artifactId and self.version and self.packaging
def to_str(self, omit_version=False):
"""Return string representation of the coordinates."""
mvnstr = "{g}:{a}".format(g=self.groupId, a=self.artifactId)
pack = self.packaging
if pack == MavenCoordinates._default_packaging:
pack = ''
if pack:
mvnstr += ":{p}".format(p=pack)
if self.classifier:
if not pack:
mvnstr += ':'
mvnstr += ":{c}".format(c=self.classifier)
if not self.version or omit_version:
if self.classifier or pack:
mvnstr += ':'
else:
mvnstr += ":{v}".format(v=self.version)
return mvnstr
def to_repo_url(self, ga_only=False):
"""Return relative path to the artifact in Maven repository."""
if ga_only:
return "{g}/{a}".format(g=self.groupId.replace('.', '/'),
a=self.artifactId)
dir_path = "{g}/{a}/{v}/".format(g=self.groupId.replace('.', '/'),
a=self.artifactId,
v=self.version)
classifier = "-{c}".format(c=self.classifier) if self.classifier else ''
filename = "{a}-{v}{c}.{e}".format(a=self.artifactId,
v=self.version,
c=classifier,
e=self.packaging)
return dir_path + filename
@staticmethod
def _parse_string(coordinates_str):
"""Parse string representation into a dictionary."""
a = {'groupId': '',
'artifactId': '',
'packaging': MavenCoordinates._default_packaging,
'classifier': '',
'version': ''}
ncolons = coordinates_str.count(':')
if ncolons == 1:
a['groupId'], a['artifactId'] = coordinates_str.split(':')
elif ncolons == 2:
a['groupId'], a['artifactId'], a['version'] = coordinates_str.split(':')
elif ncolons == 3:
a['groupId'], a['artifactId'], a['packaging'], a['version'] = coordinates_str.split(':')
elif ncolons == 4:
a['groupId'], a['artifactId'], a['packaging'], a['classifier'], a['version'] = \
coordinates_str.split(':')
else:
raise ValueError('Invalid Maven coordinates %s', coordinates_str)
return a
def __repr__(self):
"""Represent as string."""
return self.to_str()
def __eq__(self, other):
"""Implement == operator."""
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
"""Implement != operator."""
return not self.__eq__(other)
@classmethod
def normalize_str(cls, coordinates_str):
"""Normalize string representation."""
return cls.from_str(coordinates_str).to_str()
@classmethod
def from_str(cls, coordinates_str):
"""Create instance from string."""
coordinates = MavenCoordinates._parse_string(coordinates_str)
return cls(**coordinates)
def parse_gh_repo(potential_url):
"""Cover the following variety of URL forms for Github repo referencing.
1) www.github.com/foo/bar
2) (same as above, but with ".git" in the end)
3) (same as the two above, but without "www.")
# all of the three above, but starting with "http://", "https://", "git://" or "git+https://"
4) git@github.com:foo/bar
5) (same as above, but with ".git" in the end)
6) (same as the two above but with "ssh://" in front or with "git+ssh" instead of "git")
We return repository name in form `<username>/<reponame>` or `None` if this does not
seem to be a Github repo (or if someone invented yet another form that we can't parse yet...)
Notably, the Github repo *must* have exactly username and reponame, nothing else and nothing
more. E.g. `github.com/<username>/<reponame>/<something>` is *not* recognized.
"""
# TODO: reduce cyclomatic complexity
if not potential_url:
return None
repo_name = None
# transform 4-6 to a URL-like string, so that we can handle it together with 1-3
if '@' in potential_url:
split = potential_url.split('@')
if len(split) == 2 and split[1].startswith('github.com:'):
potential_url = 'http://' + split[1].replace('github.com:', 'github.com/')
# make it parsable by urlparse if it doesn't contain scheme
if not potential_url.startswith(('http://', 'https://', 'git://', 'git+https://')):
potential_url = 'http://' + potential_url
# urlparse should handle it now
parsed = urlparse(potential_url)
if parsed.netloc in ['github.com', 'www.github.com'] and \
parsed.scheme in ['http', 'https', 'git', 'git+https']:
repo_name = parsed.path
if repo_name.endswith('.git'):
repo_name = repo_name[:-len('.git')]
if repo_name:
repo_name = repo_name.strip('/')
if len(repo_name.split('/')) > 2:
temp_list = repo_name.split('/')
repo_name = temp_list[0] + '/' + temp_list[1]
if repo_name.count('/') != 1:
return None
return repo_name
def url2git_repo(url):
"""Convert URL to git repo URL and force use HTTPS."""
if url.startswith('git+'):
return url[len('git+'):]
if url.startswith('git@'):
url = url[len('git@'):]
url = url.split(':')
if len(url) != 2:
raise ValueError("Unable to parse git repo URL '%s'" % str(url))
return 'https://{}/{}'.format(url[0], url[1])
if not url.startswith(('http://', 'https://', 'git://')):
return 'http://' + url
return url
def case_sensitivity_transform(ecosystem, name):
"""Transform package name to lowercase for ecosystem that are not case sensitive.
:param ecosystem: name of ecosystem in which the package is sits
:param name: name of ecosystem
:return: transformed package name base on ecosystem package case sensitivity
"""
if Ecosystem.by_name(StoragePool.get_connected_storage('BayesianPostgres').session,
ecosystem).is_backed_by(EcosystemBackend.pypi):
return name.lower()
return name
def get_session_retry(retries=3, backoff_factor=0.2, status_forcelist=(404, 500, 502, 504),
session=None):
"""Set HTTP Adapter with retries to session."""
session = session or requests.Session()
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
return session
def normalize_package_name(ecosystem_backend, name):
"""Normalize package name.
:param ecosystem_backend: str, ecosystem backend
:param name: str, package name
:return: str, normalized package name for supported ecosystem backend,
the same package name otherwise
"""
normalized_name = name
if ecosystem_backend == 'pypi':
# https://www.python.org/dev/peps/pep-0503/#normalized-names
normalized_name = re.sub(r'[-_.]+', '-', name).lower()
elif ecosystem_backend == 'maven':
# https://maven.apache.org/pom.html#Maven_Coordinates
normalized_name = MavenCoordinates.normalize_str(name)
elif ecosystem_backend == 'npm':
normalized_name = name
elif ecosystem_backend == 'go':
# go package name is the host+path part of a URL, thus it can be URL encoded
normalized_name = unquote(name)
return normalized_name
def get_user_email(user_profile):
"""Return default email if user_profile doesn't contain any."""
default_email = 'bayesian@redhat.com'
if user_profile is not None:
return user_profile.get('email', default_email)
else:
return default_email
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=2, min=10, max=60))
def get_response(url):
"""Wrap requests which tries to get response.
:param url: URL where to do the request
:param sleep_time: sleep time between retries
:param retry_count: number of retries
:return: content of response's json
"""
try:
response = requests.get(url, headers=get_header())
# If status code is 404 or 204 then don't retry
if response.status_code in [404, 204]:
return {}
response.raise_for_status()
response = response.json()
return response
except HTTPError as err:
message = "Failed to get results from {url} with {err}".format(url=url, err=err)
logger.error(message)
raise NotABugTaskError(message) from err
def add_maven_coords_to_set(coordinates_str, gav_set):
"""Add Maven coordinates to the gav_set set."""
artifact_coords = MavenCoordinates.from_str(coordinates_str)
gav_set.add("{ecosystem}:{group_id}:{artifact_id}:{version}".format(
ecosystem="maven",
group_id=artifact_coords.groupId,
artifact_id=artifact_coords.artifactId,
version=artifact_coords.version
))
def peek(iterable):
"""Peeks the iterable to check if it's empty."""
try:
first = next(iterable)
except StopIteration:
return None
return first
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=2, min=10, max=60))
def get_gh_contributors(url):
"""Get number of contributors from Git URL.
:param url: URL where to do the request
:return: length of contributor's list
"""
try:
response = requests.get("{}?per_page=1".format(url),
headers=get_header())
# If status code is 404 or 204 then don't retry
if response.status_code == 404:
return -1
if response.status_code == 204:
return 0
response.raise_for_status()
contributors_count = int(parse_qs(response.links['last']['url'])['page'][0]) \
if response.links else 1
return contributors_count
except HTTPError as err:
raise NotABugTaskError(err) from err
def store_data_to_s3(arguments, s3, result):
"""Store data to S3 bucket."""
try:
s3.store_data(arguments, result)
except Exception as e:
logger.error(e)
@tenacity.retry(stop=tenacity.stop_after_attempt(4),
wait=tenacity.wait_exponential(multiplier=3, min=10, max=60))
def get_gh_query_response(repo_name, status, type, start_date, end_date, event):
"""Get details of PRs and Issues from given Github repo.
:param repo_name: Github repo name
:param status: status of issue Ex. open/closed
:param type: type of issue to set in search query Ex. pr/issue
:param start_date: date since data has to be collected
:param end_date: date upto data has to be collected
:param event: even which need to be considered Ex. created/closed
:return: count of issue/pr based on criteria
"""
try:
"""
Create search query for given criteria
page and per_page is set to 1, as search query provides count of entities
matching with given criteria in all pages we dont need to collect all data.
"""
url = "{GITHUB_API}search/issues?" \
"page=1" \
"&per_page=1" \
"&q=repo:{repo_name}" \
"+is:{type}" \
"+{event}:{start_date}..{end_date}"\
.format(GITHUB_API=configuration.GITHUB_API,
repo_name=repo_name,
start_date=start_date,
end_date=end_date,
type=type,
event=event)
# If status is set to closed by default open & closed both are set
if status:
url = '{url}+is:{status}'.format(url=url, status=status)
response = requests.get(url, headers=get_header())
response.raise_for_status()
resp = response.json()
return resp.get('total_count', 0)
except Exception as e:
logger.error(e)
raise
@tenacity.retry(stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_exponential(multiplier=1, min=4, max=10))
def execute_gh_queries(repo_name, start_date, end_date):
"""Get details of Github PR/Issues based on given date range.
:param repo_name: Github repo name
:param start_date: date since data has to be collected
:param end_date: date upto data has to be collected
:return: count of issue/pr based on criteria
"""
try:
# Get PR details based on date range provided
pr_opened = get_gh_query_response(repo_name, '',
'pr', start_date, end_date, 'created')
pr_closed = get_gh_query_response(repo_name, 'closed',
'pr', start_date, end_date, 'closed')
# Get Issue details based on date range provided
issues_opened = get_gh_query_response(repo_name,
'', 'issue', start_date, end_date, 'created')
issues_closed = get_gh_query_response(repo_name,
'closed', 'issue', start_date, end_date, 'closed')
return pr_opened, pr_closed, issues_opened, issues_closed
except Exception as e:
logger.error(e)
raise
def get_gh_pr_issue_counts(repo_name):
"""Get details of Github PR/Issues for given repo.
:param repo_name: Github repo name
:return: Dict having Issue/PR details
"""
today = datetime.date.today()
# Get previous month start and end dates
last_month_end_date = today
last_month_start_date = today - datetime.timedelta(days=30)
# Get PR/Issue counts for previous month
try:
pr_opened_last_month, \
pr_closed_last_month, \
issues_opened_last_month, \
issues_closed_last_month = execute_gh_queries(repo_name,
last_month_start_date,
last_month_end_date)
except Exception as e:
logger.error(e)
pr_opened_last_month = \
pr_closed_last_month = \
issues_opened_last_month = \
issues_closed_last_month = -1
# Get previous year and start and end dates of year
last_year_start_date = today - datetime.timedelta(days=365)
last_year_end_date = today
# Get PR/Issue counts for previous year
try:
pr_opened_last_year, \
pr_closed_last_year, \
issues_opened_last_year, \
issues_closed_last_year = execute_gh_queries(repo_name,
last_year_start_date,
last_year_end_date)
except Exception as e:
logger.error(e)
pr_opened_last_year = \
pr_closed_last_year = \
issues_opened_last_year = \
issues_closed_last_year = -1
# Set output in required format by data importer
result = {
"updated_pull_requests": {
"year": {"opened": pr_opened_last_year, "closed": pr_closed_last_year},
"month": {"opened": pr_opened_last_month, "closed": pr_closed_last_month}
},
"updated_issues": {
"year": {"opened": issues_opened_last_year, "closed": issues_closed_last_year},
"month": {"opened": issues_opened_last_month, "closed": issues_closed_last_month}
}
}
return result
def get_header():
"""Get random Github token from env variables."""
headers = {
'Accept': 'application/vnd.github.mercy-preview+json, ' # for topics
'application/vnd.github.v3+json' # recommended by GitHub for License API
}
try:
_, header = configuration.select_random_github_token()
headers.update(header)
except F8AConfigurationException as e:
logger.error(e)
headers.update({})
return headers
| fabric8-analytics/fabric8-analytics-worker | f8a_worker/utils.py | Python | gpl-3.0 | 28,130 |
"""
S.Tomin and I.Zagorodnov, 2017, DESY/XFEL
"""
from ocelot.common.globals import *
import logging
logger = logging.getLogger(__name__)
try:
import numexpr as ne
ne_flag = True
except:
logger.debug("coord_transform.py: module NUMEXPR is not installed. Install it to speed up calculation")
ne_flag = False
def xp_2_xxstg_mad(xp, xxstg, gamref):
# to mad format
N = xp.shape[1]
pref = m_e_eV * np.sqrt(gamref ** 2 - 1)
betaref = np.sqrt(1 - gamref ** -2)
u = np.c_[xp[3], xp[4], xp[5]]
if ne_flag:
sum_u2 = ne.evaluate('sum(u * u, 1)')
gamma = ne.evaluate('sqrt(1 + sum_u2 / m_e_eV ** 2)')
beta = ne.evaluate('sqrt(1 - gamma ** -2)')
else:
gamma = np.sqrt(1 + np.sum(u * u, 1) / m_e_eV ** 2)
beta = np.sqrt(1 - gamma ** -2)
if np.__version__ > "1.8":
p0 = np.linalg.norm(u, 2, 1).reshape((N, 1))
else:
p0 = np.sqrt(u[:, 0] ** 2 + u[:, 1] ** 2 + u[:, 2] ** 2).reshape((N, 1))
u = u / p0
u0 = u[:, 0]
u1 = u[:, 1]
u2 = u[:, 2]
if ne_flag:
xp0 = xp[0]
xp1 = xp[1]
xp2 = xp[2]
cdt = ne.evaluate('-xp2 / (beta * u2)')
xxstg[0] = ne.evaluate('xp0 + beta * u0 * cdt')
xxstg[2] = ne.evaluate('xp1 + beta * u1 * cdt')
xxstg[5] = ne.evaluate('(gamma / gamref - 1) / betaref')
else:
cdt = -xp[2] / (beta * u2)
xxstg[0] = xp[0] + beta * u0 * cdt
xxstg[2] = xp[1] + beta * u1 * cdt
xxstg[5] = (gamma / gamref - 1) / betaref
xxstg[4] = cdt
xxstg[1] = xp[3] / pref
xxstg[3] = xp[4] / pref
return xxstg
def xxstg_2_xp_mad(xxstg, xp, gamref):
# from mad format
N = xxstg.shape[1]
#pref = m_e_eV * np.sqrt(gamref ** 2 - 1)
betaref = np.sqrt(1 - gamref ** -2)
if ne_flag:
xxstg1 = xxstg[1]
xxstg3 = xxstg[3]
xxstg5 = xxstg[5]
gamma = ne.evaluate('(betaref * xxstg5 + 1) * gamref')
beta = ne.evaluate('sqrt(1 - gamma ** -2)')
pz2pref = ne.evaluate('sqrt(((gamma * beta) / (gamref * betaref)) ** 2 - xxstg1 ** 2 - xxstg3 ** 2)')
else:
gamma = (betaref * xxstg[5] + 1) * gamref
beta = np.sqrt(1 - gamma ** -2)
pz2pref = np.sqrt(((gamma * beta) / (gamref * betaref)) ** 2 - xxstg[1] ** 2 - xxstg[3] ** 2)
u = np.c_[xxstg[1] / pz2pref, xxstg[3] / pz2pref, np.ones(N)]
if np.__version__ > "1.8":
norm = np.linalg.norm(u, 2, 1).reshape((N, 1))
else:
norm = np.sqrt(u[:, 0] ** 2 + u[:, 1] ** 2 + u[:, 2] ** 2).reshape((N, 1))
u = u / norm
u0 = u[:, 0]
u1 = u[:, 1]
u2 = u[:, 2]
if ne_flag:
xxstg0 = xxstg[0]
xxstg2 = xxstg[2]
xxstg4 = xxstg[4]
xp[0] = ne.evaluate('xxstg0 - u0 * beta * xxstg4')
xp[1] = ne.evaluate('xxstg2 - u1 * beta * xxstg4')
xp[2] = ne.evaluate('-u2 * beta * xxstg4')
xp[3] = ne.evaluate('u0 * gamma * beta * m_e_eV')
xp[4] = ne.evaluate('u1 * gamma * beta * m_e_eV')
xp[5] = ne.evaluate('u2 * gamma * beta * m_e_eV')
else:
xp[0] = xxstg[0] - u0 * beta * xxstg[4]
xp[1] = xxstg[2] - u1 * beta * xxstg[4]
xp[2] = -u2 * beta * xxstg[4]
xp[3] = u0 * gamma * beta * m_e_eV
xp[4] = u1 * gamma * beta * m_e_eV
xp[5] = u2 * gamma * beta * m_e_eV
return xp
| ocelot-collab/ocelot | ocelot/cpbd/coord_transform.py | Python | gpl-3.0 | 3,359 |
import os
def post_add(conf, name):
if "POST_ADD_HOOK" in conf:
if "NEW_PASSWORD_NAME" in os.environ:
del os.environ["NEW_PASSWORD_NAME"]
passwd_dir = os.path.expanduser(os.path.expandvars(conf["PASSWD_DIR"]))
hook_exec = os.path.expandvars(conf["POST_ADD_HOOK"])
os.chdir(passwd_dir)
os.putenv("NEW_PASSWORD_NAME", name)
os.putenv("PASSWD_FILE", os.path.expandvars(conf["PASSWD_FILE"]))
os.system(hook_exec)
| lkrotowski/passwdk | src/passwdk/hooks.py | Python | gpl-3.0 | 431 |
#------------------------------------------------------------------------------
# Name: test_node.py
# Purpose: Test the Node class
#
# Author: Aleksander Vines
#
# Created: 2016-02-26
# Last modified:2016-02-26T16:00
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
#------------------------------------------------------------------------------
from __future__ import absolute_import
import unittest
import os
from . import nansat_test_data as ntd
from nansat.node import Node
class NodeTest(unittest.TestCase):
def test_creation(self):
tag = 'Root'
value = ' Value '
anAttr = 'elValue'
new_value = 'New Value'
node = Node(tag, value=value, anAttr=anAttr)
self.assertEqual(node.tag, tag)
self.assertDictEqual(node.attributes, {'anAttr': anAttr})
self.assertEqual(node.value, value.strip())
self.assertEqual(node[tag], value.strip())
node[tag] = new_value
self.assertEqual(node.value, new_value)
def test_getAttributeList(self):
tag = 'Root'
value = ' Value '
anAttr = 'elValue'
secondAttr = 'Some value'
finalAttribute = 'A last value'
node = Node(tag, value=value, anAttr=anAttr, secondAttr=secondAttr,
finalAttribute=finalAttribute)
nameList, valList = node.getAttributeList()
self.assertIsInstance(nameList, list)
self.assertIsInstance(valList, list)
index = valList.index(anAttr)
self.assertEqual(nameList[index], 'anAttr')
index = valList.index(secondAttr)
self.assertEqual(nameList[index], 'secondAttr')
index = valList.index(finalAttribute)
self.assertEqual(nameList[index], 'finalAttribute')
def test_insert(self):
contents = ('<Element attr="attrValue"><Subnode>testValue</Subnode>'
'</Element>')
root = Node('root')
root2 = root.insert(contents)
element = root2.node('Element')
rawElement = Node.create(contents)
self.assertEqual(element.xml(), rawElement.xml())
def test_create(self):
test_file_element = os.path.join(ntd.test_data_path,
'some_xml_file.xml')
fileElement = Node.create(test_file_element)
with open(test_file_element, 'r') as myfile:
contents = myfile.read().replace('\n', '')
root = Node('root')
root = root.insert(contents)
rawElement = root.children[0]
self.assertEqual(fileElement.xml(), rawElement.xml())
def test_delete_attribute(self):
tag = 'Root'
value = ' Value '
anAttr = 'elValue'
node = Node(tag, value=value, anAttr=anAttr)
self.assertIn('anAttr', node.attributes)
node.delAttribute('anAttr')
self.assertNotIn('anAttr', node.attributes)
def test_add_node(self):
rootTag = 'Root'
root = Node(rootTag)
firstLevelTag = 'FirstLevel'
firstLevel = Node(firstLevelTag)
root += firstLevel
self.assertIn(firstLevel, root.children)
def test_add_nodes(self):
rootTag = 'Root'
root = Node(rootTag)
firstLevelTag = 'FirstLevel'
firstLevel = Node(firstLevelTag)
root += firstLevel
firstLevel2 = Node(firstLevelTag)
root += firstLevel2
firstLevel2ndTag = 'FirstLevel2ndTag'
firstLevel3 = Node(firstLevel2ndTag)
root = root + firstLevel3
self.assertIn(firstLevel, root.children)
self.assertIn(firstLevel2, root.children)
self.assertIn(firstLevel3, root.children)
def test_xml(self):
rootTag = 'Root'
root = Node(rootTag)
firstLevelTag = 'FirstLevel'
firstLevel = Node(firstLevelTag)
root += firstLevel
firstLevel2 = Node(firstLevelTag)
root += firstLevel2
firstLevel2ndTag = 'FirstLevel2ndTag'
firstLevel3 = Node(firstLevel2ndTag)
root += firstLevel3
self.assertEqual(root.xml(),
('<Root>\n'
' <FirstLevel/>\n'
' <FirstLevel/>\n'
' <FirstLevel2ndTag/>\n'
'</Root>\n'),)
def test_replace_node(self):
rootTag = 'Root'
root = Node(rootTag)
firstLevelTag = 'FirstLevel'
firstLevel = Node(firstLevelTag)
root += firstLevel
firstLevel2 = Node(firstLevelTag)
root += firstLevel2
firstLevel2ndTag = 'FirstLevel2ndTag'
firstLevel3 = Node(firstLevel2ndTag)
root.replaceNode(firstLevelTag, 1, firstLevel3)
self.assertIn(firstLevel, root.children)
self.assertNotIn(firstLevel2, root.children)
self.assertIn(firstLevel3, root.children)
self.assertEqual(len(root.children), 2)
def test_search_node(self):
rootTag = 'Root'
root = Node(rootTag)
firstLevelTag = 'FirstLevel'
firstLevel = Node(firstLevelTag)
root += firstLevel
firstLevel2 = Node(firstLevelTag)
root += firstLevel2
firstLevel2ndTag = 'FirstLevel2ndTag'
firstLevel3 = Node(firstLevel2ndTag)
root += firstLevel3
self.assertEqual(root.node(firstLevelTag,0), firstLevel)
self.assertEqual(root.node(firstLevelTag,1), firstLevel2)
def test_str(self):
tag = 'Root'
value = 'Value'
node = Node(tag, value=value)
self.assertEqual(str(node), '%s\n value: [%s]' % (tag, value))
if __name__ == "__main__":
unittest.main()
| nansencenter/nansat | nansat/tests/test_node.py | Python | gpl-3.0 | 5,838 |
import soundcloud
from soundcloud.tests.utils import MockResponse
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from nose.tools import eq_, raises
from fudge import patch
def test_kwargs_parsing_valid():
"""Test that valid kwargs are stored as properties on the client."""
client = soundcloud.Client(client_id='foo', client_secret='foo')
assert isinstance(client, soundcloud.Client)
eq_('foo', client.client_id)
client = soundcloud.Client(client_id='foo', client_secret='bar',
access_token='baz', username='you',
password='secret', redirect_uri='foooo')
eq_('foo', client.client_id)
eq_('baz', client.access_token)
@raises(AttributeError)
def test_kwargs_parsing_invalid():
"""Test that unknown kwargs are ignored."""
client = soundcloud.Client(foo='bar', client_id='bar')
client.foo
def test_url_creation():
"""Test that resources are turned into urls properly."""
client = soundcloud.Client(client_id='foo')
url = client._resolve_resource_name('tracks')
eq_('https://api.soundcloud.com/tracks', url)
url = client._resolve_resource_name('/tracks/')
eq_('https://api.soundcloud.com/tracks', url)
def test_url_creation_options():
"""Test that resource resolving works with different options."""
client = soundcloud.Client(client_id='foo', use_ssl=False)
client.host = 'soundcloud.dev'
url = client._resolve_resource_name('apps/132445')
eq_('http://soundcloud.dev/apps/132445', url)
def test_method_dispatching():
"""Test that getattr is doing right by us."""
client = soundcloud.Client(client_id='foo')
for method in ('get', 'post', 'put', 'delete', 'head'):
p = getattr(client, method)
eq_((method,), p.args)
eq_('_request', p.func.__name__)
def test_host_config():
"""We should be able to set the host on the client."""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev')
eq_('api.soundcloud.dev', client.host)
client = soundcloud.Client(client_id='foo')
eq_('api.soundcloud.com', client.host)
@patch('requests.get')
def test_disabling_ssl_verification(fake_get):
"""We should be able to disable ssl verification when we are in dev mode"""
client = soundcloud.Client(client_id='foo', host='api.soundcloud.dev',
verify_ssl=False)
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url,
headers=headers,
verify=False,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@raises(AttributeError)
def test_method_dispatching_invalid_method():
"""Test that getattr raises an attributeerror if we give it garbage."""
client = soundcloud.Client(client_id='foo')
client.foo()
@patch('requests.get')
def test_method_dispatching_get_request_readonly(fake_get):
"""Test that calling client.get() results in a proper call
to the get function in the requests module with the provided
kwargs as the querystring.
"""
client = soundcloud.Client(client_id='foo')
expected_url = '%s?%s' % (
client._resolve_resource_name('tracks'),
urlencode({
'limit': 5,
'client_id': 'foo'
}))
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_get.expects_call()
.with_args(expected_url, headers=headers, allow_redirects=True)
.returns(MockResponse("{}")))
client.get('tracks', limit=5)
@patch('requests.post')
def test_method_dispatching_post_request(fake_post):
"""Test that calling client.post() results in a proper call
to the post function in the requests module.
TODO: Revise once read/write support has been added.
"""
client = soundcloud.Client(client_id='foo')
expected_url = client._resolve_resource_name('tracks')
data = {
'client_id': 'foo'
}
headers = {
'User-Agent': soundcloud.USER_AGENT
}
(fake_post.expects_call()
.with_args(expected_url,
data=data,
headers=headers,
allow_redirects=True)
.returns(MockResponse("{}")))
client.post('tracks')
@patch('requests.get')
def test_proxy_servers(fake_request):
"""Test that providing a dictionary of proxy servers works."""
proxies = {
'http': 'myproxyserver:1234'
}
client = soundcloud.Client(client_id='foo', proxies=proxies)
expected_url = "%s?%s" % (
client._resolve_resource_name('me'),
urlencode({
'client_id': 'foo'
})
)
headers = {
'User-Agent': soundcloud.USER_AGENT,
'Accept': 'application/json'
}
(fake_request.expects_call()
.with_args(expected_url,
headers=headers,
proxies=proxies,
allow_redirects=True)
.returns(MockResponse("{}")))
client.get('/me')
| adazey/Muzez | libs/soundcloud/tests/test_client.py | Python | gpl-3.0 | 5,660 |
import logging
import re
from markdown.extensions.codehilite import CodeHilite
from markdown.extensions.codehilite import CodeHiliteExtension
from markdown.preprocessors import Preprocessor
from markdown.treeprocessors import Treeprocessor
from wiki.core.markdown import add_to_registry
logger = logging.getLogger(__name__)
def highlight(code, config, tab_length, lang=None):
code = CodeHilite(
code,
linenums=config["linenums"],
guess_lang=config["guess_lang"],
css_class=config["css_class"],
style=config["pygments_style"],
noclasses=config["noclasses"],
tab_length=tab_length,
use_pygments=config["use_pygments"],
lang=lang,
)
html = code.hilite()
html = """<div class="codehilite-wrap">{}</div>""".format(html)
return html
class WikiFencedBlockPreprocessor(Preprocessor):
"""
This is a replacement of markdown.extensions.fenced_code which will
directly and without configuration options invoke the vanilla CodeHilite
extension.
"""
FENCED_BLOCK_RE = re.compile(
r"""
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # Opening ``` or ~~~
(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*))?[ ]* # Optional {, and lang
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?[ ]*
}?[ ]*\n # Optional closing }
(?P<code>.*?)(?<=\n)
(?P=fence)[ ]*$""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
CODE_WRAP = "<pre>%s</pre>"
def __init__(self, md):
super().__init__(md)
self.checked_for_codehilite = False
self.codehilite_conf = {}
def run(self, lines):
"""Match and store Fenced Code Blocks in the HtmlStash."""
text = "\n".join(lines)
while 1:
m = self.FENCED_BLOCK_RE.search(text)
if m:
lang = ""
if m.group("lang"):
lang = m.group("lang")
html = highlight(
m.group("code"), self.config, self.markdown.tab_length, lang=lang
)
placeholder = self.markdown.htmlStash.store(html)
text = "%s\n%s\n%s" % (text[: m.start()], placeholder, text[m.end() :])
else:
break
return text.split("\n")
class HiliteTreeprocessor(Treeprocessor):
"""Hilight source code in code blocks."""
def run(self, root):
"""Find code blocks and store in htmlStash."""
blocks = root.iter("pre")
for block in blocks:
if len(block) == 1 and block[0].tag == "code":
html = highlight(block[0].text, self.config, self.markdown.tab_length)
placeholder = self.markdown.htmlStash.store(html)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = "p"
block.text = placeholder
class WikiCodeHiliteExtension(CodeHiliteExtension):
"""
markdown.extensions.codehilite cannot configure container tags but forces
code to be in <table></table>, so we had to overwrite some of the code
because it's hard to extend...
"""
def extendMarkdown(self, md):
"""Add HilitePostprocessor to Markdown instance."""
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
if "hilite" in md.treeprocessors:
logger.warning(
"Replacing existing 'hilite' extension - please remove "
"'codehilite' from WIKI_MARKDOWN_KWARGS"
)
del md.treeprocessors["hilite"]
add_to_registry(md.treeprocessors, "hilite", hiliter, "<inline")
if "fenced_code_block" in md.preprocessors:
logger.warning(
"Replacing existing 'fenced_code_block' extension - please remove "
"'fenced_code_block' or 'extras' from WIKI_MARKDOWN_KWARGS"
)
del md.preprocessors["fenced_code_block"]
hiliter = WikiFencedBlockPreprocessor(md)
hiliter.config = self.getConfigs()
add_to_registry(
md.preprocessors, "fenced_code_block", hiliter, ">normalize_whitespace"
)
md.registerExtension(self)
def makeExtension(*args, **kwargs):
"""Return an instance of the extension."""
return WikiCodeHiliteExtension(*args, **kwargs)
| django-wiki/django-wiki | src/wiki/core/markdown/mdx/codehilite.py | Python | gpl-3.0 | 4,523 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from os import path
from unittest import mock
import fixtures
import snapcraft
from snapcraft.plugins import gulp, nodejs
from snapcraft import tests
class GulpPluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.internal.common.run')
self.run_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('snapcraft.sources.Tar')
self.tar_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('sys.stdout')
patcher.start()
self.addCleanup(patcher.stop)
def test_pull_local_sources(self):
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertFalse(self.run_mock.called, 'run() was called')
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(plugin.options.node_engine),
path.join(os.path.abspath('.'), 'parts', 'test-part', 'npm')),
mock.call().download()])
def test_build(self):
self.useFixture(tests.fixture_setup.CleanEnvironment())
self.useFixture(fixtures.EnvironmentVariable(
'PATH', '/bin'))
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
open(os.path.join(plugin.sourcedir, 'package.json'), 'w').close()
plugin.build()
path = '{}:/bin'.format(os.path.join(plugin._npm_dir, 'bin'))
self.run_mock.assert_has_calls([
mock.call(['npm', 'install', '-g', 'gulp-cli'],
cwd=plugin.builddir, env={'PATH': path}),
mock.call(['npm', 'install', '--only-development'],
cwd=plugin.builddir, env={'PATH': path}),
])
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(plugin.options.node_engine),
os.path.join(plugin._npm_dir)),
mock.call().provision(
plugin._npm_dir, clean_target=False, keep_tarball=True)])
@mock.patch('platform.machine')
def test_unsupported_arch_raises_exception(self, machine_mock):
machine_mock.return_value = 'fantasy-arch'
class Options:
source = None
gulp_tasks = []
node_engine = '4'
with self.assertRaises(EnvironmentError) as raised:
gulp.GulpPlugin('test-part', Options(), self.project_options)
self.assertEqual(raised.exception.__str__(),
'architecture not supported (fantasy-arch)')
def test_schema(self):
self.maxDiff = None
plugin_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'additionalProperties': False,
'properties': {
'gulp-tasks': {'default': [],
'items': {'type': 'string'},
'minitems': 1,
'type': 'array',
'uniqueItems': True},
'node-engine': {'default': '4.4.4', 'type': 'string'},
'source': {'type': 'string'},
'source-branch': {'default': '', 'type': 'string'},
'source-subdir': {'default': None, 'type': 'string'},
'source-tag': {'default': '', 'type:': 'string'},
'source-type': {'default': '', 'type': 'string'},
'disable-parallel': {'default': False, 'type': 'boolean'}},
'pull-properties': ['source', 'source-type', 'source-branch',
'source-tag', 'source-subdir', 'node-engine'],
'build-properties': ['disable-parallel', 'gulp-tasks'],
'required': ['source', 'gulp-tasks'],
'type': 'object'}
self.assertEqual(gulp.GulpPlugin.schema(), plugin_schema)
def test_clean_pull_step(self):
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertTrue(os.path.exists(plugin._npm_dir))
plugin.clean_pull()
self.assertFalse(os.path.exists(plugin._npm_dir))
| attente/snapcraft | snapcraft/tests/test_plugin_gulp.py | Python | gpl-3.0 | 5,389 |
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sugarcub.settings')
from django.conf import settings # noqa
app = Celery('sugarcub')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| bronycub/sugarcub | sugarcub/celery.py | Python | gpl-3.0 | 576 |
"""Code for finding content."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import collections
import os
from ... import types as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
): # type: (...) -> None
self.root = root
self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
self.__paths_tree = paths_to_tree(self.__paths)
self.__files_tree = paths_to_tree(self.__files)
def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory): # type: (str) -> t.List[str]
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory): # type: (str) -> t.List[str]
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
plugin_paths, # type: t.Dict[str, str]
collection=None, # type: t.Optional[CollectionDetail]
integration_path=None, # type: t.Optional[str]
unit_path=None, # type: t.Optional[str]
unit_module_path=None, # type: t.Optional[str]
unit_module_utils_path=None, # type: t.Optional[str]
): # type: (...) -> None
super(ContentLayout, self).__init__(root, paths)
self.plugin_paths = plugin_paths
self.collection = collection
self.integration_path = integration_path
self.integration_targets_path = os.path.join(integration_path, 'targets')
self.integration_vars_path = os.path.join(integration_path, 'integration_config.yml')
self.unit_path = unit_path
self.unit_module_path = unit_module_path
self.unit_module_utils_path = unit_module_utils_path
self.is_ansible = root == ANSIBLE_SOURCE_ROOT
@property
def prefix(self): # type: () -> str
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self): # type: () -> t.Optional[str]
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self): # type: () -> t.Optional[str]
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self): # type: () -> t.Optional[str]
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self): # type: () -> t.Optional[str]
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class CollectionDetail:
"""Details about the layout of the current collection."""
def __init__(self,
name, # type: str
namespace, # type: str
root, # type: str
): # type: (...) -> None
self.name = name
self.namespace = namespace
self.root = root
self.full_name = '%s.%s' % (namespace, name)
self.prefix = '%s.' % self.full_name
self.directory = os.path.join('ansible_collections', namespace, name)
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
)
@abc.abstractmethod
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a layout using the given root and paths."""
def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
"""Return a filesystem tree from the given list of paths."""
tree = {}, []
for path in paths:
parts = path.split(os.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
| amenonsen/ansible | test/lib/ansible_test/_internal/provider/layout/__init__.py | Python | gpl-3.0 | 6,980 |
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba 4."""
__docformat__ = "restructuredText"
import os
import sys
import samba.param
def source_tree_topdir():
'''return the top level directory (the one containing the source4 directory)'''
paths = [ "../../..", "../../../.." ]
for p in paths:
topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
if os.path.exists(os.path.join(topdir, 'source4')):
return topdir
raise RuntimeError("unable to find top level source directory")
def in_source_tree():
'''return True if we are running from within the samba source tree'''
try:
topdir = source_tree_topdir()
except RuntimeError:
return False
return True
import ldb
from samba._ldb import Ldb as _Ldb
class Ldb(_Ldb):
"""Simple Samba-specific LDB subclass that takes care
of setting up the modules dir, credentials pointers, etc.
Please note that this is intended to be for all Samba LDB files,
not necessarily the Sam database. For Sam-specific helper
functions see samdb.py.
"""
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens a Samba Ldb file.
:param url: Optional LDB URL to open
:param lp: Optional loadparm object
:param modules_dir: Optional modules directory
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
:param flags: Optional LDB flags
:param options: Additional options (optional)
This is different from a regular Ldb file in that the Samba-specific
modules-dir is used by default and that credentials and session_info
can be passed through (required by some modules).
"""
if modules_dir is not None:
self.set_modules_dir(modules_dir)
else:
self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
if session_info is not None:
self.set_session_info(session_info)
if credentials is not None:
self.set_credentials(credentials)
if lp is not None:
self.set_loadparm(lp)
# This must be done before we load the schema, as these handlers for
# objectSid and objectGUID etc must take precedence over the 'binary
# attribute' declaration in the schema
self.register_samba_handlers()
# TODO set debug
def msg(l, text):
print text
#self.set_debug(msg)
self.set_utf8_casefold()
# Allow admins to force non-sync ldb for all databases
if lp is not None:
nosync_p = lp.get("nosync", "ldb")
if nosync_p is not None and nosync_p == True:
flags |= ldb.FLG_NOSYNC
self.set_create_perms(0600)
if url is not None:
self.connect(url, flags, options)
def searchone(self, attribute, basedn=None, expression=None,
scope=ldb.SCOPE_BASE):
"""Search for one attribute as a string.
:param basedn: BaseDN for the search.
:param attribute: Name of the attribute
:param expression: Optional search expression.
:param scope: Search scope (defaults to base).
:return: Value of attribute as a string or None if it wasn't found.
"""
res = self.search(basedn, scope, expression, [attribute])
if len(res) != 1 or res[0][attribute] is None:
return None
values = set(res[0][attribute])
assert len(values) == 1
return self.schema_format_value(attribute, values.pop())
def erase_users_computers(self, dn):
"""Erases user and computer objects from our AD.
This is needed since the 'samldb' module denies the deletion of primary
groups. Therefore all groups shouldn't be primary somewhere anymore.
"""
try:
res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
expression="(|(objectclass=user)(objectclass=computer))")
except ldb.LdbError, (errno, _):
if errno == ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
return
else:
raise
try:
for msg in res:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
def erase_except_schema_controlled(self):
"""Erase this ldb.
:note: Removes all records, except those that are controlled by
Samba4's schema.
"""
basedn = ""
# Try to delete user/computer accounts to allow deletion of groups
self.erase_users_computers(basedn)
# Delete the 'visible' records, and the invisble 'deleted' records (if this DB supports it)
for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"]):
try:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
res = self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))", [], controls=["show_deleted:0", "show_recycled:0"])
assert len(res) == 0
# delete the specials
for attr in ["@SUBCLASSES", "@MODULES",
"@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def erase(self):
"""Erase this ldb, removing all records."""
self.erase_except_schema_controlled()
# delete the specials
for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def load_ldif_file_add(self, ldif_path):
"""Load a LDIF file.
:param ldif_path: Path to LDIF file.
"""
self.add_ldif(open(ldif_path, 'r').read())
def add_ldif(self, ldif, controls=None):
"""Add data based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
assert changetype == ldb.CHANGETYPE_NONE
self.add(msg, controls)
def modify_ldif(self, ldif, controls=None):
"""Modify database based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
if changetype == ldb.CHANGETYPE_ADD:
self.add(msg, controls)
else:
self.modify(msg, controls)
def substitute_var(text, values):
"""Substitute strings of the form ${NAME} in str, replacing
with substitutions from values.
:param text: Text in which to subsitute.
:param values: Dictionary with keys and values.
"""
for (name, value) in values.items():
assert isinstance(name, str), "%r is not a string" % name
assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
text = text.replace("${%s}" % name, value)
return text
def check_all_substituted(text):
"""Check that all substitution variables in a string have been replaced.
If not, raise an exception.
:param text: The text to search for substitution variables
"""
if not "${" in text:
return
var_start = text.find("${")
var_end = text.find("}", var_start)
raise Exception("Not all variables substituted: %s" %
text[var_start:var_end+1])
def read_and_sub_file(file_name, subst_vars):
"""Read a file and sub in variables found in it
:param file_name: File to be read (typically from setup directory)
param subst_vars: Optional variables to subsitute in the file.
"""
data = open(file_name, 'r').read()
if subst_vars is not None:
data = substitute_var(data, subst_vars)
check_all_substituted(data)
return data
def setup_file(template, fname, subst_vars=None):
"""Setup a file in the private dir.
:param template: Path of the template file.
:param fname: Path of the file to create.
:param subst_vars: Substitution variables.
"""
if os.path.exists(fname):
os.unlink(fname)
data = read_and_sub_file(template, subst_vars)
f = open(fname, 'w')
try:
f.write(data)
finally:
f.close()
def valid_netbios_name(name):
"""Check whether a name is valid as a NetBIOS name. """
# See crh's book (1.4.1.1)
if len(name) > 15:
return False
for x in name:
if not x.isalnum() and not x in " !#$%&'()-.@^_{}~":
return False
return True
def import_bundled_package(modulename, location):
"""Import the bundled version of a package.
:note: This should only be called if the system version of the package
is not adequate.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
if in_source_tree():
sys.path.insert(0, os.path.join(source_tree_topdir(), "lib", location))
sys.modules[modulename] = __import__(modulename)
else:
sys.modules[modulename] = __import__(
"samba.external.%s" % modulename, fromlist=["samba.external"])
def ensure_external_module(modulename, location):
"""Add a location to sys.path if an external dependency can't be found.
:param modulename: Module name to import
:param location: Location to add to sys.path (can be relative to
${srcdir}/lib)
"""
try:
__import__(modulename)
except ImportError:
import_bundled_package(modulename, location)
from samba import _glue
version = _glue.version
interface_ips = _glue.interface_ips
set_debug_level = _glue.set_debug_level
get_debug_level = _glue.get_debug_level
unix2nttime = _glue.unix2nttime
nttime2string = _glue.nttime2string
nttime2unix = _glue.nttime2unix
unix2nttime = _glue.unix2nttime
generate_random_password = _glue.generate_random_password
strcasecmp_m = _glue.strcasecmp_m
strstr_m = _glue.strstr_m
| gwr/samba | source4/scripting/python/samba/__init__.py | Python | gpl-3.0 | 11,696 |
CONGRESS_API_KEY = "" | wilfriedE/PoliticalMashup | settings.py | Python | gpl-3.0 | 21 |
#-------------------------------------------------------------------------------
#
# This file is part of pygimplib.
#
# Copyright (C) 2014, 2015 khalim19 <khalim19@gmail.com>
#
# pygimplib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pygimplib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pygimplib. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
"""
This module defines the following classes:
* `ItemData` - an associative container that stores all GIMP items and item
groups of a certain type
* subclasses of `ItemData`:
* `LayerData` for layers
* `ChannelData` for channels
* `PathData` for paths
* `_ItemDataElement` - wrapper for `gimp.Item` objects containing custom
attributes derived from the original `gimp.Item` attributes
"""
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import os
import abc
from collections import OrderedDict
from collections import namedtuple
import gimp
from . import pgpath
from . import objectfilter
#===============================================================================
pdb = gimp.pdb
#===============================================================================
class ItemData(object):
"""
This class is an interface to store all items (and item groups) of a certain
type (e.g. layers, channels or paths) of a GIMP image in an ordered
dictionary, allowing to access the items via their names and get various
custom attributes derived from the existing item attributes.
Use one of the subclasses for items of a certain type:
* `LayerData` for layers,
* `ChannelData` for channels,
* `PathData` for paths (vectors).
For custom item attributes, see the documentation for the `_ItemDataElement`
class. `_ItemDataElement` is common for all `ItemData` subclasses.
Attributes:
* `image` - GIMP image to get item data from.
* `is_filtered` - If True, ignore items that do not match the filter
(`ObjectFilter`) in this object when iterating.
* `filter` (read-only) - `ObjectFilter` instance where you can add or remove
filter rules or subfilters to filter items.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, image, is_filtered=False, filter_match_type=objectfilter.ObjectFilter.MATCH_ALL):
self.image = image
self.is_filtered = is_filtered
# Filters applied to all items in self._itemdata
self._filter = objectfilter.ObjectFilter(filter_match_type)
# Contains all items (including item groups) in the item tree.
# key: `_ItemDataElement.orig_name` (derived from `gimp.Item.name`, which is unique)
# value: `_ItemDataElement` object
self._itemdata = OrderedDict()
# key `_ItemDataElement` object (parent) or None (root of the item tree)
# value: set of `_ItemDataElement` objects
self._uniquified_itemdata = {}
self._fill_item_data()
@property
def filter(self):
return self._filter
def __getitem__(self, name):
"""
Access an `_ItemDataElement` object by its `orig_name` attribute.
"""
return self._itemdata[name]
def __contains__(self, name):
"""
Return True if an `_ItemDataElement` object, specified by its `orig_name`
attribute, is in the item data. Otherwise return False.
"""
return name in self._itemdata
def __len__(self):
"""
Return the number of all item data elements - that is, all immediate
children of the image and all nested children.
"""
return len([item_elem for item_elem in self])
def __iter__(self):
"""
If `is_filtered` is False, iterate over all items. If `is_filtered` is True,
iterate only over items that match the filter in this object.
Yields:
* `item_elem` - The current `_ItemDataElement` object.
"""
if not self.is_filtered:
for item_elem in self._itemdata.values():
yield item_elem
else:
for item_elem in self._itemdata.values():
if self._filter.is_match(item_elem):
yield item_elem
def _items(self):
"""
Yield current (`gimp.Item.name`, `_ItemDataElement` object) tuple.
"""
if not self.is_filtered:
for name, item_elem in self._itemdata.items():
yield name, item_elem
else:
for name, item_elem in self._itemdata.items():
if self._filter.is_match(item_elem):
yield name, item_elem
def uniquify_name(self, item_elem, include_item_path=True,
uniquifier_position=None, uniquifier_position_parents=None):
"""
Make the `name` attribute in the specified `_ItemDataElement` object
unique among all other, already uniquified `_ItemDataElement` objects.
To achieve uniquification, a string ("uniquifier") in the form of
" (<number>)" is inserted at the end of the item names.
Parameters:
* `item_elem` - `_ItemDataElement` object whose `name` attribute
will be uniquified.
* `include_item_path` - If True, take the item path into account when
uniquifying.
* `uniquifier_position` - Position (index) where the uniquifier is inserted
into the current item. If the position is None, insert the uniquifier at
the end of the item name (i.e. append it).
* `uniquifier_position_parents` - Position (index) where the uniquifier is
inserted into the parents of the current item. If the position is None,
insert the uniquifier at the end of the name of each parent. This
parameter has no effect if `include_item_path` is False.
"""
if include_item_path:
for elem in item_elem.parents + [item_elem]:
parent = elem.parent
if parent not in self._uniquified_itemdata:
self._uniquified_itemdata[parent] = set()
if elem not in self._uniquified_itemdata[parent]:
item_names = set([elem_.name for elem_ in self._uniquified_itemdata[parent]])
if elem.name not in item_names:
self._uniquified_itemdata[parent].add(elem)
else:
if elem == item_elem:
position = uniquifier_position
else:
position = uniquifier_position_parents
elem.name = pgpath.uniquify_string(elem.name, item_names, position)
self._uniquified_itemdata[parent].add(elem)
else:
# Use None as the root of the item tree.
parent = None
if parent not in self._uniquified_itemdata:
self._uniquified_itemdata[parent] = set()
item_elem.name = pgpath.uniquify_string(
item_elem.name, self._uniquified_itemdata[parent], uniquifier_position)
self._uniquified_itemdata[parent].add(item_elem.name)
def _fill_item_data(self):
"""
Fill the _itemdata dictionary, containing
<gimp.Item.name, _ItemDataElement> pairs.
"""
_ItemTreeNode = namedtuple('_ItemTreeNode', ['children', 'parents'])
item_tree = [_ItemTreeNode(self._get_children_from_image(self.image), [])]
while item_tree:
node = item_tree.pop(0)
index = 0
for item in node.children:
parents = list(node.parents)
item_elem = _ItemDataElement(item, parents)
if pdb.gimp_item_is_group(item):
item_tree.insert(index, _ItemTreeNode(self._get_children_from_item(item), parents + [item_elem]))
index += 1
self._itemdata[item_elem.orig_name] = item_elem
@abc.abstractmethod
def _get_children_from_image(self, image):
"""
Return a list of immediate child items from the specified image.
If no child items exist, return an empty list.
"""
pass
@abc.abstractmethod
def _get_children_from_item(self, item):
"""
Return a list of immediate child items from the specified item.
If no child items exist, return an empty list.
"""
pass
class LayerData(ItemData):
def _get_children_from_image(self, image):
return image.layers
def _get_children_from_item(self, item):
return item.layers
class ChannelData(ItemData):
def _get_children_from_image(self, image):
return image.channels
def _get_children_from_item(self, item):
return item.children
class PathData(ItemData):
def _get_children_from_image(self, image):
return image.vectors
def _get_children_from_item(self, item):
return item.children
#===============================================================================
class _ItemDataElement(object):
"""
This class wraps a `gimp.Item` object and defines custom item attributes.
Note that the attributes will not be up to date if changes were made to the
original `gimp.Item` object.
Attributes:
* `item` (read-only) - `gimp.Item` object.
* `parents` (read-only) - List of `_ItemDataElement` parents for this item,
sorted from the topmost parent to the bottommost (immediate) parent.
* `level` (read-only) - Integer indicating which level in the item tree is
the item positioned at. 0 means the item is at the top level. The higher
the level, the deeper the item is in the item tree.
* `parent` (read-only) - Immediate `_ItemDataElement` parent of this object.
If this object has no parent, return None.
* `item_type` (read-only) - Item type - one of the following:
* `ITEM` - normal item,
* `NONEMPTY_GROUP` - non-empty item group (contains children),
* `EMPTY_GROUP` - empty item group (contains no children).
* `name` - Item name as a `unicode` string, initially equal to the `orig_name`
attribute. Modify this attribute instead of `gimp.Item.name` to avoid
modifying the original item.
* `orig_name` (read-only) - original `gimp.Item.name` as a `unicode` string.
* `path_visible` (read-only) - Visibility of all item's parents and this
item. If all items are visible, `path_visible` is True. If at least one
of these items is invisible, `path_visible` is False.
"""
__ITEM_TYPES = ITEM, NONEMPTY_GROUP, EMPTY_GROUP = (0, 1, 2)
def __init__(self, item, parents=None):
if item is None:
raise TypeError("item cannot be None")
self.name = item.name.decode()
self.tags = set()
self._orig_name = self.name
self._item = item
self._parents = parents if parents is not None else []
self._level = len(self._parents)
if self._parents:
self._parent = self._parents[-1]
else:
self._parent = None
if pdb.gimp_item_is_group(self._item):
if self._item.children:
self._item_type = self.NONEMPTY_GROUP
else:
self._item_type = self.EMPTY_GROUP
else:
self._item_type = self.ITEM
self._path_visible = self._get_path_visibility()
@property
def item(self):
return self._item
@property
def parents(self):
return self._parents
@property
def level(self):
return self._level
@property
def parent(self):
return self._parent
@property
def item_type(self):
return self._item_type
@property
def orig_name(self):
return self._orig_name
@property
def path_visible(self):
return self._path_visible
def get_file_extension(self):
"""
Get file extension from the `name` attribute.
If `name` has no file extension, return an empty string.
"""
return pgpath.get_file_extension(self.name)
def set_file_extension(self, file_extension):
"""
Set file extension in the `name` attribute.
To remove the file extension from `name`, pass an empty string or None.
"""
root = os.path.splitext(self.name)[0]
if file_extension:
self.name = '.'.join((root, file_extension))
else:
self.name = root
def get_filepath(self, directory, include_item_path=True):
"""
Return file path given the specified directory, item name and names of its
parents.
If `include_item_path` is True, create file path in the following format:
<directory>/<item path components>/<item name>
If `include_item_path` is False, create file path in the following format:
<directory>/<item name>
If directory is not an absolute path or is None, prepend the current working
directory.
Item path components consist of parents' item names, starting with the
topmost parent.
"""
if directory is None:
directory = ""
path = os.path.abspath(directory)
if include_item_path:
path_components = self.get_path_components()
if path_components:
path = os.path.join(path, os.path.join(*path_components))
path = os.path.join(path, self.name)
return path
def get_path_components(self):
"""
Return a list of names of all parents of this item as path components.
"""
return [parent.name for parent in self.parents]
def validate_name(self):
"""
Validate the `name` attribute of this item and all of its parents.
"""
self.name = pgpath.FilenameValidator.validate(self.name)
for parent in self._parents:
parent.name = pgpath.FilenameValidator.validate(parent.name)
def _get_path_visibility(self):
"""
If this item and all of its parents are visible, return True, otherwise
return False.
"""
path_visible = True
if not self._item.visible:
path_visible = False
else:
for parent in self._parents:
if not parent.item.visible:
path_visible = False
break
return path_visible
| Buggaboo/gimp-plugin-export-layers | export_layers/pygimplib/pgitemdata.py | Python | gpl-3.0 | 14,487 |
#!/usr/bin/env python
#
"""
These functions, when given a magnitude mag between cmin and cmax, return
a colour tuple (red, green, blue). Light blue is cold (low magnitude)
and yellow is hot (high magnitude).
"""
import math
def floatRgb(mag, cmin, cmax, alpha=1.0):
"""
Return a tuple of floats between 0 and 1 for the red, green and
blue amplitudes.
"""
try:
# normalize to [0,1]
x = float(mag-cmin)/float(cmax-cmin)
except:
# cmax = cmin
x = 0.5
blue = min((max((4*(0.75-x), 0.)), 1.))
red = min((max((4*(x-0.25), 0.)), 1.))
green= min((max((4*math.fabs(x-0.5)-1., 0.)), 1.))
return (red, green, blue, alpha)
def strRgb(mag, cmin, cmax):
"""
Return a tuple of strings to be used in Tk plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return "#%02x%02x%02x" % (red*255, green*255, blue*255)
def rgb(mag, cmin, cmax):
"""
Return a tuple of integers to be used in AWT/Java plots.
"""
red, green, blue = floatRgb(mag, cmin, cmax)
return (int(red*255), int(green*255), int(blue*255))
def htmlRgb(mag, cmin, cmax):
"""
Return a tuple of strings to be used in HTML documents.
"""
return "#%02x%02x%02x"%rgb(mag, cmin, cmax)
| cliburn/flow | src/plugins/visual/TwoDFrame/colormap.py | Python | gpl-3.0 | 1,367 |
# vim: set fileencoding=utf-8 :
# GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import solfege
import webbrowser
import textwrap
# We move x-www-browser to the end of the list because on my
# debian etch system, the browser does will freeze solfege until
# I close the browser window.
try:
i = webbrowser._tryorder.index("x-www-browser")
webbrowser._tryorder.append(webbrowser._tryorder[i])
del webbrowser._tryorder[i]
except ValueError:
pass
import sys
import traceback
import locale
import os
import urllib
import shutil
try:
from pyalsa import alsaseq
except ImportError:
alsaseq = None
from solfege import winlang
from solfege import buildinfo
from solfege.esel import FrontPage, TestsView, SearchView
from gi.repository import Gtk
from gi.repository import Gdk
from solfege import utils
from solfege import i18n
class SplashWin(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, Gtk.WindowType.POPUP)
self.set_position(Gtk.WindowPosition.CENTER)
self.set_resizable(True)
frame = Gtk.Frame()
frame.set_shadow_type(Gtk.ShadowType.OUT)
self.add(frame)
vbox = Gtk.VBox()
vbox.set_border_width(20)
frame.add(vbox)
l = Gtk.Label(label=_("Starting GNU Solfege %s") % buildinfo.VERSION_STRING)
l.set_name("Heading1")
vbox.pack_start(l, True, True, 0)
l = Gtk.Label(label="http://www.solfege.org")
vbox.pack_start(l, True, True, 0)
self.g_infolabel = Gtk.Label(label='')
vbox.pack_start(self.g_infolabel, True, True, 0)
self.show_all()
def show_progress(self, txt):
self.g_infolabel.set_text(txt)
while Gtk.events_pending():
Gtk.main_iteration()
from solfege.configwindow import ConfigWindow
from solfege.profilemanager import ChangeProfileDialog
from solfege import gu
from solfege import cfg
from solfege import mpd
from solfege import lessonfile
from solfege import download_pyalsa
from solfege import statistics
from solfege import stock
from solfege import frontpage
from solfege import fpeditor
from solfege.trainingsetdlg import TrainingSetDialog
from solfege.practisesheetdlg import PractiseSheetDialog
from solfege import filesystem
class MusicViewerWindow(Gtk.Dialog):
def __init__(self):
Gtk.Dialog.__init__(self)
self.set_default_size(500, 300)
self.g_music_displayer = mpd.MusicDisplayer()
self.vbox.pack_start(self.g_music_displayer, True, True, 0)
b = gu.bButton(self.action_area, _("Close"), solfege.win.close_musicviewer)
b.grab_focus()
self.connect('destroy', solfege.win.close_musicviewer)
self.show_all()
def display_music(self, music):
fontsize = cfg.get_int('config/feta_font_size=20')
self.g_music_displayer.display(music, fontsize)
class MainWin(Gtk.Window, cfg.ConfigUtils):
default_front_page = os.path.join(lessonfile.exercises_dir, 'learningtree.txt')
debug_front_page = os.path.join(lessonfile.exercises_dir, 'debugtree.txt')
def __init__(self, options, datadir):
Gtk.Window.__init__(self, Gtk.WindowType.TOPLEVEL)
self._vbox = Gtk.VBox()
self._vbox.show()
self.add(self._vbox)
stock.SolfegeIconFactory(self, datadir)
Gtk.Settings.get_default().set_property('gtk-button-images', True)
cfg.ConfigUtils.__dict__['__init__'](self, 'mainwin')
self.set_resizable(self.get_bool('gui/mainwin_user_resizeable'))
self.add_watch('gui/mainwin_user_resizeable', lambda s: self.set_resizable(self.get_bool('gui/mainwin_user_resizeable')))
self.connect('delete-event', self.quit_program)
self.connect('key_press_event', self.on_key_press_event)
self.g_about_window = None
self.m_exercise = None
self.m_viewer = None
self.box_dict = {}
self.g_config_window = None
self.g_path_info_dlg = None
self.g_musicviewer_window = None
self.m_history = []
self.g_ui_manager = Gtk.UIManager()
self.m_action_groups = {
'Exit': Gtk.ActionGroup('Exit'),
'NotExit': Gtk.ActionGroup('NotExit'),
}
for a in self.m_action_groups.values():
self.g_ui_manager.insert_action_group(a, 1)
self.setup_menu()
self.main_box = Gtk.VBox()
self.main_box.show()
self._vbox.pack_start(self.main_box, True, True, 0)
def get_view(self):
"""
Return the view that is currently visible.
Raise KeyError if no view has yet been added.
"""
return self.box_dict[self.m_viewer]
def add_view(self, view, name):
"""
Hide the current view.
Add and view the new view.
"""
assert name not in self.box_dict
if self.m_viewer:
self.get_view().hide()
self.box_dict[name] = view
self.main_box.pack_start(self.box_dict[name], True, True, 0)
self.box_dict[name].show()
self.m_viewer = name
def show_view(self, name):
"""
Return False if the view does not exist.
Hide the current visible view, show the view named 'name' and
return True.
"""
if name not in self.box_dict:
return False
self.get_view().hide()
self.m_viewer = name
self.box_dict[name].show()
return True
def change_frontpage(self, filename):
"""
Change to a different frontpage file.
"""
self.set_string('app/frontpage', filename)
self.load_frontpage()
def load_frontpage(self):
"""
Load the front page file set in the config database into
solfege.app.m_frontpage_data
"""
filename = self.get_string("app/frontpage")
if filename == self.debug_front_page and not solfege.app.m_options.debug:
self.set_string("app/frontpage", self.default_front_page)
filename = self.default_front_page
if not os.path.isfile(filename):
filename = self.default_front_page
try:
solfege.app.m_frontpage_data = frontpage.load_tree(filename)
except Exception, e:
if solfege.splash_win:
solfege.splash_win.hide()
solfege.app.m_frontpage_data = frontpage.load_tree(self.default_front_page)
self.set_string('app/frontpage', self.default_front_page)
gu.dialog_ok(_("Loading front page '%s' failed. Using default page." % filename),
secondary_text = "\n".join(traceback.format_exception(*sys.exc_info())))
if solfege.splash_win:
solfege.splash_win.show()
self.display_frontpage()
def setup_menu(self):
self.m_action_groups['Exit'].add_actions([
('FileMenu', None, _('_File')),
('AppQuit', 'gtk-quit', None, None, None, self.quit_program),
])
self.m_action_groups['NotExit'].add_actions([
('TheoryMenu', None, _('The_ory')),
('FrontPagesMenu', None, _('Sele_ct Front Page')),
('TheoryIntervals', None, _('_Intervals'), None, None,
lambda o: solfege.app.handle_href('theory-intervals.html')),
('TreeEditor', None, _('_Edit Front Page'), None, None,
self.do_tree_editor),
('ExportTrainingSet', None, _(u'E_xport Exercises to Audio Files…'), None, None,
self.new_training_set_editor),
('EditPractiseSheet', None, _(u'Ear Training Test Pri_ntout…'), None, None,
self.new_practisesheet_editor),
('ProfileManager', None, _("Profile _Manager"), None, None,
self.open_profile_manager),
('OpenPreferencesWindow', 'gtk-preferences', None, '<ctrl>F12', None,
self.open_preferences_window),
('HelpMenu', None, _('_Help')),
('Search', 'gtk-search', _('_Search Exercises'), '<ctrl>F', None,
self.on_search_all_exercises),
('FrontPage', None, _('_Front Page'), 'F5', None,
lambda w: self.display_frontpage()),
('TestsPage', None, _('_Tests Page'), 'F6', None,
lambda w: self.display_testpage()),
('RecentExercises', None, _('_Recent Exercises'), 'F7', None,
self.display_recent_exercises),
('RecentTests', None, _('_Recent Tests'), 'F8', None,
self.display_recent_tests),
('UserExercises', None, _('_User Exercises'), 'F9', None,
self.display_user_exercises),
('SetupPyAlsa', None, _("Download and compile ALSA modules"), None, None, self.setup_pyalsa),
('HelpHelp', 'gtk-help', _('_Help on the current exercise'), 'F1', None,
lambda o: solfege.app.please_help_me()),
('HelpTheory', None, _('_Music theory on the current exercise'), 'F3', None, lambda o: solfege.app.show_exercise_theory()),
('HelpIndex', None, _('_User manual'), None, None,
lambda o: solfege.app.handle_href('index.html')),
('HelpShowPathInfo', None, _('_File locations'), None,
None, self.show_path_info),
('HelpOnline', None, _('_Mailing lists, web page etc.'), None, None,
lambda o: solfege.app.handle_href('online-resources.html')),
('HelpDonate', None, _('_Donate'), None, None,
lambda o: solfege.app.handle_href('http://www.solfege.org/donate/')),
('HelpReportingBugs', None, _('Reporting _bugs'), None, None,
lambda o: solfege.app.handle_href('bug-reporting.html')),
('HelpAbout', 'gtk-about', None, None, None, self.show_about_window),
('ShowBugReports', None, _('_See your bug reports'), None, None,
self.show_bug_reports),
])
self.g_ui_manager.add_ui_from_file("ui.xml")
self.add_accel_group(self.g_ui_manager.get_accel_group())
hdlbox = Gtk.HandleBox()
hdlbox.show()
hdlbox.add(self.g_ui_manager.get_widget('/Menubar'))
self._vbox.pack_start(hdlbox, False, False, 0)
self.m_help_on_current_merge_id = None
def create_frontpage_menu(self):
"""
Create, or update if already existing, the submenu that let the
user choose which front page file to display.
"""
if self.m_frontpage_merge_id:
self.g_ui_manager.remove_ui(self.m_frontpage_merge_id)
actions = []
old_dir = None
s = "<menubar name='Menubar'><menu action='FileMenu'><menu action='FrontPagesMenu'>"
for fn in frontpage.get_front_pages_list(solfege.app.m_options.debug):
if solfege.splash_win:
solfege.splash_win.show_progress(fn)
if not frontpage.may_be_frontpage(fn):
continue
try:
title = lessonfile.infocache.frontpage.get(fn, 'title')
except TypeError:
continue
cur_dir = os.path.split(fn)[0]
if old_dir != cur_dir:
s += '<separator name="sep@%s"/>' % fn
old_dir = cur_dir
s += "<menuitem action='%s'/>\n" % fn
if not self.m_action_groups['NotExit'].get_action(fn):
actions.append((fn, None, lessonfile.infocache.frontpage.get(fn, 'title'), None, fn,
lambda o, f=fn: self.change_frontpage(f)))
else:
action = self.m_action_groups['NotExit'].get_action(fn)
action.props.label = lessonfile.infocache.frontpage.get(fn, 'title')
s += "</menu></menu></menubar>"
self.m_action_groups['NotExit'].add_actions(actions)
self.m_frontpage_merge_id = self.g_ui_manager.add_ui_from_string(s)
def show_help_on_current(self):
"""
Show the menu entries for the exercise help and music theory
pages on the Help menu.
"""
if self.m_help_on_current_merge_id:
return
self.m_help_on_current_merge_id = self.g_ui_manager.add_ui_from_string("""
<menubar name='Menubar'>
<menu action='HelpMenu'>
<placeholder name='PerExerciseHelp'>
<menuitem position='top' action='HelpHelp' />
<menuitem action='HelpTheory' />
</placeholder>
</menu>
</menubar>""")
def hide_help_on_current(self):
"""
Hide the menu entries for the help and music theory pages on the
Help menu.
"""
if not self.m_help_on_current_merge_id:
return
self.g_ui_manager.remove_ui(self.m_help_on_current_merge_id)
self.m_help_on_current_merge_id = None
def show_bug_reports(self, *v):
m = Gtk.Dialog(_("Question"), self, 0)
m.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
m.add_button(Gtk.STOCK_OK, Gtk.ResponseType.OK)
vbox = Gtk.VBox()
m.vbox.pack_start(vbox, False, False, 0)
vbox.set_spacing(18)
vbox.set_border_width(12)
l = Gtk.Label(label=_("Please enter the email used when you submitted the bugs:"))
vbox.pack_start(l, False, False, 0)
self.g_email = Gtk.Entry()
m.action_area.get_children()[0].grab_default()
self.g_email.set_activates_default(True)
vbox.pack_start(self.g_email, False, False, 0)
m.show_all()
ret = m.run()
m.destroy()
if ret == Gtk.ResponseType.OK:
params = urllib.urlencode({
'pagename': 'SITS-Incoming/SearchBugs',
'q': 'SITS-Incoming/"Submitter: %s"' % utils.mangle_email(self.g_email.get_text().decode("utf-8")()),
})
try:
webbrowser.open_new("http://www.solfege.org?%s" % params)
except Exception, e:
self.display_error_message2(_("Error opening web browser"), str(e))
def display_error_message2(self, text, secondary_text):
"""
This is the new version of display_error_message, and it will
eventually replace the old.
"""
if solfege.splash_win and solfege.splash_win.props.visible:
solfege.splash_win.hide()
reshow_splash = True
else:
reshow_splash = False
if not isinstance(text, unicode):
text = text.decode(locale.getpreferredencoding(), 'replace')
if not isinstance(secondary_text, unicode):
secondary_text = secondary_text.decode(locale.getpreferredencoding(), 'replace')
m = Gtk.MessageDialog(None, Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE, text)
if secondary_text:
m.format_secondary_text(secondary_text)
m.run()
m.destroy()
if reshow_splash:
solfege.splash_win.show()
while Gtk.events_pending():
Gtk.main_iteration()
def display_error_message(self, msg, title=None, secondary_text=None):
if solfege.splash_win and solfege.splash_win.props.visible:
solfege.splash_win.hide()
reshow_splash = True
else:
reshow_splash = False
if not isinstance(msg, unicode):
msg = msg.decode(locale.getpreferredencoding(), 'replace')
m = Gtk.MessageDialog(None, Gtk.DialogFlags.MODAL, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CLOSE, None)
m.set_markup(gu.escape(msg))
if title:
m.set_title(title)
if secondary_text:
m.format_secondary_text(secondary_text)
m.run()
m.destroy()
if reshow_splash:
solfege.splash_win.show()
while Gtk.events_pending():
Gtk.main_iteration()
def show_path_info(self, w):
if not self.g_path_info_dlg:
self.g_path_info_dlg = Gtk.Dialog(_("_File locations").replace("_", ""), self,
buttons=(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT))
sc = Gtk.ScrolledWindow()
sc.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.NEVER)
self.g_path_info_dlg.vbox.pack_start(sc, True, True, 0)
#
vbox = gu.hig_dlg_vbox()
sc.add_with_viewport(vbox)
box1, box2 = gu.hig_category_vbox(_("_File locations").replace("_", ""))
vbox.pack_start(box1, True, True, 0)
sizegroup = Gtk.SizeGroup(Gtk.SizeGroupMode.HORIZONTAL)
# statistics.sqlite
# win32 solfegerc
# win32 langenviron.txt
box2.pack_start(gu.hig_label_widget(_("Solfege application data:"), Gtk.Label(label=filesystem.app_data()), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget(_("Solfege user data:"), Gtk.Label(label=filesystem.user_data()), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget(_("Solfege config file:"), Gtk.Label(label=filesystem.rcfile()), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget(_("Solfege installation directory:"), Gtk.Label(label=os.getcwdu()), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget(_("User manual in HTML format:"), Gtk.Label(label=os.path.join(os.getcwdu(), "help")), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget("gtk:", Gtk.Label(label=str(Gtk)), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget("pyalsa:", Gtk.Label(label=str(alsaseq)), sizegroup), False, False, 0)
box2.pack_start(gu.hig_label_widget("PYTHONHOME", Gtk.Label(os.environ.get('PYTHONHOME', 'Not defined')), sizegroup), False, False, 0)
self.g_path_info_dlg.show_all()
def f(*w):
self.g_path_info_dlg.hide()
return True
self.g_path_info_dlg.connect('response', f)
self.g_path_info_dlg.connect('delete-event', f)
sc.set_size_request(min(vbox.size_request().width + gu.SPACE_LARGE * 2,
Gdk.Screen.width() * 0.9),
vbox.size_request().height)
def setup_pyalsa(self, widget):
download_pyalsa.download()
def show_about_window(self, widget):
pixbuf = self.render_icon('solfege-icon', Gtk.IconSize.DIALOG)
a = self.g_about_window = Gtk.AboutDialog()
a.set_program_name("GNU Solfege")
a.set_logo(pixbuf)
a.set_website("http://www.solfege.org")
a.set_version(buildinfo.VERSION_STRING)
a.set_copyright("Copyright (C) 2013 Tom Cato Amundsen and others")
a.set_license("\n".join((solfege.application.solfege_copyright, solfege.application.warranty)))
# Using set_license_type causes the app to print warnings.
#a.set_license_type(Gtk.License.GPL_3_0)
a.set_authors(["Tom Cato Amundsen",
'Giovanni Chierico %s' % _("(some lessonfiles)"),
'Michael Becker %s' % _("(some lessonfiles)"),
'Joe Lee %s' % _("(sound code for the MS Windows port)"),
'Steve Lee %s' % _("(ported winmidi.c to gcc)"),
'Thibaus Cousin %s' % _("(spec file for SuSE 8.2)"),
'David Coe %s' %_("(spec file cleanup)"),
'David Petrou %s' % _("(testing and portability fixes for FreeBSD)"),
'Han-Wen Nienhuys %s' % _("(the music font from Lilypond)"),
'Jan Nieuwenhuizen %s' % _("(the music font from Lilypond)"),
'Davide Bonetti %s' % _("(scale exercises)"),
])
a.set_documenters(["Tom Cato Amundsen",
"Tom Eykens",
])
if _("SOLFEGETRANSLATORS") == 'SOLFEGETRANSLATORS':
a.set_translator_credits(None)
else:
a.set_translator_credits(_("SOLFEGETRANSLATORS"))
self.g_about_window.run()
self.g_about_window.destroy()
def do_tree_editor(self, *v):
"""
Open a front page editor editing the current front page.
"""
fpeditor.Editor.edit_file(self.get_string("app/frontpage"))
def post_constructor(self):
self.m_frontpage_merge_id = None
self.create_frontpage_menu()
self.g_ui_manager.add_ui_from_file("help-menu.xml")
if sys.platform != 'linux2':
self.g_ui_manager.get_widget('/Menubar/HelpMenu/SetupPyAlsa').hide()
if solfege.app.m_sound_init_exception is not None:
if solfege.splash_win:
solfege.splash_win.destroy()
solfege.splash_win = None
solfege.app.display_sound_init_error_message(solfege.app.m_sound_init_exception)
# MIGRATION 3.9.0
if sys.platform == "win32" \
and os.path.exists(os.path.join(filesystem.get_home_dir(), "lessonfiles")) \
and not os.path.exists(filesystem.user_lessonfiles()):
if solfege.splash_win:
solfege.splash_win.hide()
do_move = gu.dialog_yesno(_('In Solfege 3.9.0, the location where Solfege look for lesson files you have created was changed. The files has to be moved from "%(old)s" and into the folder "%(gnu)s" in your "%(doc)s" folder.\nMay I move the files automatically for you now?' % {
'doc': os.path.split(os.path.split(filesystem.user_data())[0])[1],
'gnu': os.path.join(filesystem.appname, 'lessonfiles'),
'old': os.path.join(filesystem.get_home_dir(), "lessonfiles"),
}), parent=self)
if do_move:
try:
os.makedirs(filesystem.user_data())
shutil.copytree(os.path.join(filesystem.get_home_dir(), "lessonfiles"),
os.path.join(filesystem.user_data(), "lessonfiles"))
except (OSError, shutil.Error), e:
gu.dialog_ok(_("Error while copying directory:\n%s" % e))
else:
gu.dialog_ok(_("Files copied. The old files has been left behind. Please delete them when you have verified that all files was copied correctly."))
if solfege.splash_win:
solfege.splash_win.show()
# MIGRATION 3.9.3 when we added langenviron.bat and in 3.11
# we migrated to langenviron.txt because we does not use cmd.exe
if sys.platform == 'win32' and winlang.win32_get_langenviron() != self.get_string('app/lc_messages'):
gu.dialog_ok(_("Migrated old language setup. You might have to restart the program all translated messages to show up."))
winlang.win32_put_langenviron(self.get_string('app/lc_messages'))
# MIGRATION 3.11.1: earlier editors would create new learning trees
# below app_data() instead of user_data().
if (sys.platform == "win32" and
os.path.exists(os.path.join(filesystem.app_data(),
"learningtrees"))):
if not os.path.exists(os.path.join(filesystem.user_data(), "learningtrees")):
os.makedirs(os.path.join(filesystem.user_data(), "learningtrees"))
for fn in os.listdir(os.path.join(filesystem.app_data(), "learningtrees")):
if not os.path.exists(os.path.join(filesystem.user_data(), "learningtrees", fn)):
shutil.move(os.path.join(filesystem.app_data(), "learningtrees", fn),
os.path.join(filesystem.user_data(), "learningtrees"))
else:
# We add the .bak exstention if the file already exists.
shutil.move(os.path.join(filesystem.app_data(), "learningtrees", fn),
os.path.join(filesystem.user_data(), "learningtrees", u"%s.bak" % fn))
os.rmdir(os.path.join(os.path.join(filesystem.app_data(), "learningtrees")))
item = self.g_ui_manager.get_widget("/Menubar/FileMenu/FrontPagesMenu")
item.connect('activate', lambda s: self.create_frontpage_menu())
try:
i18n.locale_setup_failed
print >> sys.stderr, "\n".join(textwrap.wrap("Translations are disabled because your locale settings are broken. This is not a bug in GNU Solfege, so don't report it. The README file distributed with the program has some more details."))
except AttributeError:
pass
for filename in lessonfile.infocache.frontpage.iter_old_format_files():
gu.dialog_ok(_("Cannot load front page file"), None,
_(u"The file «%s» is saved in an old file format. The file can be converted by editing and saving it with an older version of Solfege. Versions from 3.16.0 to 3.20.4 should do the job.") % filename)
def activate_exercise(self, module, urlobj=None):
self.show_view(module)
# We need this test because not all exercises use a notebook.
if self.get_view().g_notebook:
if urlobj and urlobj.action in ['practise', 'config', 'statistics']:
self.get_view().g_notebook.set_current_page(
['practise', 'config', 'statistics'].index(urlobj.action))
else:
self.get_view().g_notebook.set_current_page(0)
self.set_title("Solfege - " + self.get_view().m_t.m_P.header.title)
def display_docfile(self, fn):
"""
Display the HTML file named by fn in the help browser window.
"""
for lang in solfege.app.m_userman_language, "C":
filename = os.path.join(os.getcwdu(), u"help", lang, fn)
if os.path.isfile(filename):
break
try:
webbrowser.open(filename)
except Exception, e:
self.display_error_message2(_("Error opening web browser"), str(e))
def display_user_exercises(self, w):
col = frontpage.Column()
page = frontpage.Page(_('User exercises'), col)
curdir = None
linklist = None
for filename in lessonfile.infocache.iter_user_files(only_user_collection=True):
dir, fn = os.path.split(filename)
if dir != curdir:
curdir = dir
linklist = frontpage.LinkList(dir)
col.append(linklist)
linklist.append(filename)
if os.path.isdir(filesystem.user_lessonfiles()):
linklist = None
col.append(frontpage.Paragraph(_('You really should move the following directory to a directory below <span font_family="monospace">%s</span>. Future versions of GNU Solfege will not display files in the old location. The user manual have details on where to place the files.') % os.path.join(filesystem.user_data(), u'exercises')))
# Added just to be nice with people not moving their files from
# pre 3.15.3 location:
for filename in os.listdir(filesystem.user_lessonfiles()):
if not linklist:
linklist = frontpage.LinkList(filesystem.user_lessonfiles())
linklist.append(os.path.join(filesystem.user_lessonfiles(), filename))
# only display the linklist if there are any files.
if linklist:
col.append(linklist)
self.display_frontpage(page)
def display_recent_exercises(self, w):
data = frontpage.Page(_('Recent exercises'),
[frontpage.Column(
[frontpage.LinkList(_('Recent exercises'),
solfege.db.recent(8))])])
self.display_frontpage(data, show_topics=True)
self.get_view().g_searchbox.hide()
def display_recent_tests(self, w):
data = frontpage.Page(_('Recent tests'),
[frontpage.Column(
[frontpage.LinkList(_('Recent tests'),
solfege.db.recent_tests(8))])])
self.display_testpage(data, show_topics=True)
self.get_view().g_searchbox.hide()
def display_testpage(self, data=None, show_topics=False):
"""
Display the front page of the data in solfege.app.m_frontpage_data
"""
self.set_title("GNU Solfege - tests")
if not self.show_view('testspage'):
p = TestsView()
p.connect('link-clicked', self.history_handler)
self.add_view(p, 'testspage')
self.get_view().g_searchbox.show()
if not data:
data = solfege.app.m_frontpage_data
self.trim_history(self.get_view(), data)
self.get_view().display_data(data, show_topics=show_topics)
def on_search_all_exercises(self, widget=None):
self.set_title("GNU Solfege")
if not self.show_view('searchview'):
self.add_view(SearchView(_('Search the exercise titles of all lesson files found by the program, not just the active front page with sub pages.')), 'searchview')
def display_frontpage(self, data=None, show_topics=False):
"""
Display the front page of the data in solfege.app.m_frontpage_data
"""
if solfege.app.m_options.profile:
self.set_title("GNU Solfege - %s" % solfege.app.m_options.profile)
else:
self.set_title("GNU Solfege")
if not self.show_view('frontpage'):
p = FrontPage()
p.connect('link-clicked', self.history_handler)
self.add_view(p, 'frontpage')
self.get_view().g_searchbox.show()
if not data:
data = solfege.app.m_frontpage_data
self.trim_history(self.get_view(), data)
self.get_view().display_data(data, show_topics=show_topics)
def trim_history(self, new_viewer, new_page):
# First check if the page we want to display is in m_history.
# If so, we will trunkate history after it.
for i, (viewer, page) in enumerate(self.m_history):
if (new_viewer != viewer) or (new_page == page):
self.m_history = self.m_history[:i]
break
def history_handler(self, *w):
self.m_history.append(w)
def initialise_exercise(self, teacher):
"""
Create a Gui object for the exercise and add it to
the box_dict dict.
"""
assert teacher.m_exname not in self.box_dict
self.get_view().hide()
m = solfege.app.import_module(teacher.m_exname)
self.add_view(m.Gui(teacher), teacher.m_exname)
def on_key_press_event(self, widget, event):
try:
view = self.get_view()
except KeyError:
return
if (event.type == Gdk.EventType.KEY_PRESS
and event.get_state() & Gdk.ModifierType.MOD1_MASK == Gdk.ModifierType.MOD1_MASK# Alt key
and event.keyval in (Gdk.KEY_KP_Left, Gdk.KEY_Left)
and self.m_history
and not solfege.app.m_test_mode):
obj, page = self.m_history[-1]
self.trim_history(obj, page)
# Find the box_dict key for obj
for k, o in self.box_dict.items():
if o == obj:
obj.display_data(page)
self.show_view(k)
break
return True
view.on_key_press_event(widget, event)
def open_profile_manager(self, widget=None):
p = ChangeProfileDialog(solfege.app.m_options.profile)
if p.run() == Gtk.ResponseType.ACCEPT:
prof = p.get_profile()
else:
# The user presses cancel. This will use the same profile as
# before, but if the user has renamed the active profile, then
# we need to use the new name.
prof = p.m_default_profile
solfege.app.reset_exercise()
solfege.app.m_options.profile = prof
solfege.db.conn.commit()
solfege.db.conn.close()
solfege.db = statistics.DB(None, profile=prof)
cfg.set_string("app/last_profile", prof)
self.display_frontpage()
p.destroy()
def open_preferences_window(self, widget=None):
if not self.g_config_window:
self.g_config_window = ConfigWindow()
self.g_config_window.show()
else:
self.g_config_window.update_old_statistics_info()
self.g_config_window.update_statistics_info()
self.g_config_window.show()
def quit_program(self, *w):
can_quit = True
for dlg in gu.EditorDialogBase.instance_dict.values():
if dlg.close_window():
dlg.destroy()
else:
can_quit = False
break
if can_quit:
solfege.app.quit_program()
Gtk.main_quit()
else:
return True
def display_in_musicviewer(self, music):
if not self.g_musicviewer_window:
self.g_musicviewer_window = MusicViewerWindow()
self.g_musicviewer_window.show()
self.g_musicviewer_window.display_music(music)
def close_musicviewer(self, widget=None):
self.g_musicviewer_window.destroy()
self.g_musicviewer_window = None
def enter_test_mode(self):
if 'enter_test_mode' not in dir(self.get_view()):
gu.dialog_ok(_("The '%s' exercise module does not support test yet." % self.m_viewer))
return
self.m_action_groups['NotExit'].set_sensitive(False)
self.g = self.get_view().g_notebook.get_nth_page(0)
self.get_view().g_notebook.get_nth_page(0).reparent(self.main_box)
self.get_view().g_notebook.hide()
self.get_view().enter_test_mode()
def exit_test_mode(self):
solfege.app.m_test_mode = False
self.m_action_groups['NotExit'].set_sensitive(True)
box = Gtk.VBox()
self.get_view().g_notebook.insert_page(box, Gtk.Label(label=_("Practise")), 0)
self.g.reparent(box)
self.get_view().g_notebook.show()
self.get_view().g_notebook.get_nth_page(0).show()
self.get_view().g_notebook.set_current_page(0)
self.get_view().exit_test_mode()
def new_training_set_editor(self, widget):
dlg = TrainingSetDialog()
dlg.show_all()
def new_practisesheet_editor(self, widget):
dlg = PractiseSheetDialog()
dlg.show_all()
| yuanyelele/solfege | solfege/mainwin.py | Python | gpl-3.0 | 35,025 |
"""
Python-packaging for synbiomts
Copyright 2017 Alexander C. Reis, Howard M. Salis, all rights reserved.
"""
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='synbiomts',
version='1.0',
description='Test suite for DNA sequence-function models',
url='http://github.com/reisalex/SynBioMTS',
author='Alexander C. Reis',
author_email='alex.reis@psu.edu',
license='MIT',
packages=['synbiomts'],
# install_requires=['numpy','scipy','pandas','biopython'],
zip_safe=False) | reisalex/test-sfm | setup.py | Python | gpl-3.0 | 588 |
# -*- coding: utf-8 -*-
import os
"""
Illustration d'un exercice de TD visant à montrer l'évolution temporelle de la
densité de probabilité pour la superposition équiprobable d'un état n=1 et
d'un état n quelconque (à fixer) pour le puits quantique infini.
Par souci de simplicité, on se débrouille pour que E_1/hbar = 1
"""
import numpy as np # Boîte à outils numériques
import matplotlib.pyplot as plt # Boîte à outils graphiques
from matplotlib import animation # Pour l'animation progressive
# Second état n observer (à fixer)
n = 2
# On met tous les paramètres à 1 (ou presque)
t0 = 0
dt = 0.1
L = 1
hbar = 1
h = hbar * 2 * np.pi
m = (2 * np.pi)**2
E1 = h**2 / (8 * m * L**2)
En = n * E1
x = np.linspace(0, L, 1000)
def psi1(x, t):
return np.sin(np.pi * x / L) * np.exp(1j * E1 * t / hbar)
def psin(x, t):
return np.sin(n * np.pi * x / L) * np.exp(1j * En * t / hbar)
def psi(x, t):
return 1 / L**0.5 * (psi1(x, t) + psin(x, t))
fig = plt.figure()
line, = plt.plot(x, abs(psi(x, t0))**2)
plt.title('$t={}$'.format(t0))
plt.ylabel('$|\psi(x,t)|^2$')
plt.xlabel('$x$')
plt.plot(x, abs(psi1(x, t0))**2, '--', label='$|\psi_1|^2$')
plt.plot(x, abs(psin(x, t0))**2, '--', label='$|\psi_{}|^2$'.format(n))
plt.legend()
def init():
pass
def animate(i):
t = i * dt + t0
line.set_ydata(abs(psi(x, t))**2)
plt.title('$t={}$'.format(t))
anim = animation.FuncAnimation(fig, animate, frames=1000, interval=20)
plt.show()
os.system("pause")
| NicovincX2/Python-3.5 | Physique/Physique quantique/Mécanique quantique/principe_de_superposition_lineaire.py | Python | gpl-3.0 | 1,519 |
#!/usr/bin/python
import os
import sys
import grp
import pwd
import traceback
import utils
import hooking
DEV_MAPPER_PATH = "/dev/mapper"
DEV_DIRECTLUN_PATH = '/dev/directlun'
def createdirectory(dirpath):
# we don't use os.mkdir/chown because we need sudo
command = ['/bin/mkdir', '-p', dirpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mkdir %s, err = %s\n' % (dirpath, err))
sys.exit(2)
mode = '755'
command = ['/bin/chmod', mode, dirpath]
if retcode != 0:
sys.stderr.write('directlun: error chmod %s %s, err = %s\n' % (dirpath, mode, err))
sys.exit(2)
def cloneDeviceNode(srcpath, devpath):
"""Clone a device node into a temporary private location."""
# we don't use os.remove/mknod/chmod/chown because we need sudo
command = ['/bin/rm', '-f', devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error rm -f %s, err = %s\n' % (devpath, err))
sys.exit(2)
stat = os.stat(srcpath)
major = os.major(stat.st_rdev)
minor = os.minor(stat.st_rdev)
command = ['/bin/mknod', devpath, 'b', str(major), str(minor)]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mknod %s, err = %s\n' % (devpath, err))
sys.exit(2)
mode = '660'
command = ['/bin/chmod', mode, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chmod %s to %s, err = %s\n' % (devpath, mode, err))
sys.exit(2)
group = grp.getgrnam('qemu')
gid = group.gr_gid
user = pwd.getpwnam('qemu')
uid = user.pw_uid
owner = str(uid) + ':' + str(gid)
command = ['/bin/chown', owner, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chown %s to %s, err = %s\n' % (devpath, owner, err))
sys.exit(2)
if os.environ.has_key('directlun'):
try:
luns = os.environ['directlun']
domxml = hooking.read_domxml()
createdirectory(DEV_DIRECTLUN_PATH)
for lun in luns.split(','):
try:
lun, options = lun.split(':')
except ValueError:
options = ''
options = options.split(';')
srcpath = DEV_MAPPER_PATH + '/' + lun
if not os.path.exists(srcpath):
sys.stderr.write('directlun before_vm_migration_destination: device not found %s\n' % srcpath)
sys.exit(2)
uuid = domxml.getElementsByTagName('uuid')[0]
uuid = uuid.childNodes[0].nodeValue
devpath = DEV_DIRECTLUN_PATH + '/' + lun + '-' + uuid
cloneDeviceNode(srcpath, devpath)
hooking.write_domxml(domxml)
except:
sys.stderr.write('directlun before_vm_migration_destination: [unexpected error]: %s\n' % traceback.format_exc())
sys.exit(2)
| DragonRoman/rhevm-utils | 3.0/hooks/directlun/before_vm_migrate_destination.py | Python | gpl-3.0 | 3,145 |
"""~google <search term> will return three results from the google search for <search term>"""
import re
import requests
from random import shuffle
from googleapiclient.discovery import build
import logging
from secret_example import GOOGLE_CUSTOM_SEARCH_ENGINE, GOOGLE_SEARCH_API
"""fuction to fetch data from Google Custom Search Engine API"""
def google(searchterm, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key, cache_discovery=False)
res = service.cse().list(q=searchterm, cx=cse_id, **kwargs).execute()
return res['items']
"""fuction to return first three search results"""
def google_search(searchterm):
results = google(searchterm, GOOGLE_SEARCH_API, GOOGLE_CUSTOM_SEARCH_ENGINE, num=10)
length = len(results)
retval = ""
if length < 3:
for index in range(length):
retval += results[index]['link'] + "\n"
else:
for index in range(3):
retval += results[index]['link'] + "\n"
return retval
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"~google (.*)", text)
if not match:
return
searchterm = match[0]
return google_search(searchterm)
on_bot_message = on_message
| Eulercoder/fabulous | fabulous/services/google.py | Python | gpl-3.0 | 1,253 |
from typing import List, Tuple, Union
import fwdpy11._fwdpy11
import fwdpy11._types
import numpy as np
def simplify(pop, samples):
"""
Simplify a TableCollection stored in a Population.
:param pop: A :class:`fwdpy11.DiploidPopulation`
:param samples: A list of samples (node indexes).
:return: The simplified tables and array mapping input sample IDs to output IDS
:rtype: tuple
Note that the samples argument is agnostic with respect to the time of
the nodes in the input tables. Thus, you may do things like simplify
to a set of "currently-alive" nodes plus some or all ancient samples by
including some node IDs from
:attr:`fwdpy11.DiploidPopulation.ancient_sample_metadata`.
If the input contains ancient samples, and you wish to include them in the output,
then you need to include their IDs in the samples argument.
.. note::
Due to node ID remapping, the metadata corresponding to nodes becomes a bit more
difficult to look up. You need to use the output ID map, the original IDs, and
the population's metadata containers.
.. deprecated:: 0.3.0
Prefer :func:`fwdpy11.simplify_tables`
.. versionchanged:: 0.3.0
Ancient samples are no longer kept by default
.. versionchanged:: 0.5.0
No longer requires a :class:`MutationVector` argument.
"""
import warnings
warnings.warn(
"This function is deprecated and will be removed soon. Please use fwdpy11.simplify_tables instead",
category=FutureWarning,
)
ll_t, idmap = fwdpy11._fwdpy11._simplify(pop, samples)
return fwdpy11._types.TableCollection(ll_t), idmap
def simplify_tables(
tables: fwdpy11._types.TableCollection, samples: Union[List, np.ndarray]
) -> Tuple[fwdpy11._types.TableCollection, np.ndarray]:
"""
Simplify a TableCollection.
:param pop: A table collection.
:type pop: :class:`fwdpy11.TableCollection`
:param samples: list of samples
:type list: list-like or array-like
:returns: A simplified TableCollection and an array containing remapped sample ids.
:rtype: tuple
.. versionadded:: 0.3.0
"""
ll_t, idmap = fwdpy11._fwdpy11._simplify_tables(tables, samples)
return fwdpy11._types.TableCollection(ll_t), idmap
| molpopgen/fwdpy11 | fwdpy11/_functions/simplify_tables.py | Python | gpl-3.0 | 2,314 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Btc plugin for Varas
Author: Neon & A Sad Loner
Last modified: November 2016
"""
import urllib2
from plugin import Plugin
name = 'Bitcoin'
class Bitcoin(Plugin):
def __init__(self):
Plugin.__init__(self,"bitcoin","<wallet> Return current balance from a Bitcoin wallet","A Sad Loners",1.0)
def run(self,address):
#1btc = 100000000satoshi
print "https://blockchain.info/it/q/addressbalance/"+address
try:
api = urllib2.urlopen("https://blockchain.info/it/q/addressbalance/"+address)
except:
return "Unknown Error"
resp = api.read()
satoshi = float(resp)
btc = satoshi/100000000
return "Balance: " + str(btc) | GooogIe/VarasTG | plugins/btc.py | Python | gpl-3.0 | 751 |
# Copyright (C) 2013 Bernd Feige
# This file is part of avg_q and released under the GPL v3 (see avg_q/COPYING).
"""
Presentation utilities.
"""
from . import trgfile
class PresLog(object):
# Basic log file reading.
def __init__(self,logfile,part='events'):
'''part can be 'events' or 'trials' for the first or second part'''
self.logfile=logfile
self.log=open(self.logfile,"r")
fileheader=next(self.log).rstrip('\r\n')
if not fileheader.startswith('Scenario -'):
raise Exception("PresLog: File doesn't start with 'Scenario'")
self.scenario=fileheader[11:]
#print("Scenario: %s" % self.scenario)
fileheader2=next(self.log).rstrip('\r\n')
#print("fileheader2: %s" % fileheader2)
if fileheader2.startswith('Logfile written - '):
import datetime
self.timestamp=datetime.datetime.strptime(fileheader2[18:],"%m/%d/%Y %H:%M:%S")
#print(self.timestamp)
else:
self.timestamp=None
table_start=['Subject','Trial'] if part=='events' else ['Event Type']
self.header_fields=None
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1: continue
if self.header_fields is None:
# The first table is skipped...
if fields[0] in table_start:
self.header_fields=fields
self.atstart=True
break
def __iter__(self):
for line in self.log:
fields=line.rstrip('\r\n').split('\t')
if len(fields)<=1:
# Only at the start skip empty line(s)
if self.atstart: continue
else: break
self.atstart=False
yield fields
def __del__(self):
self.close()
def close(self):
if self.log:
self.log.close()
self.log=None
class PresLogfile(trgfile.trgfile):
def __init__(self,logfile,part='events'):
self.PL=PresLog(logfile,part)
trgfile.trgfile.__init__(self,self.PL)
self.preamble['Sfreq']=10000.0
def rdr(self):
for fields in self.reader:
data=dict(zip(self.PL.header_fields,fields))
point=int(data['Time'])
description=data['Event Type']
try:
code=int(data['Code'])
except:
code= -1
description=' '.join([description,data['Code']])
yield (point, code, description)
def close(self):
if self.PL:
self.PL.close()
self.PL=None
def gettuples_abstime(self):
# We are calculating backwards from the time the log was written, which is given
# in local time, and it may happen that a DST switch occurred between start and end.
# Most plots, simply working for a given time from the start, are totally okay if you don't
# mind that the end times are still in the old frame, but since the local time here may
# already be in the new frame we have to correct to achieve this "work-from-start" behavior.
import pytz
tuples=self.gettuples()
sfreq=float(self.preamble.get('Sfreq'))
last_s=pytz.datetime.timedelta(seconds=tuples[-1][0]/sfreq)
tz_aware_end=pytz.timezone('Europe/Berlin').localize(self.PL.timestamp)
# This computes the correct local start time considering a possible DST switch and
# converts it to the TZ-unaware local time we really want...
self.start_datetime=tz_aware_end.tzinfo.normalize(tz_aware_end-last_s).replace(tzinfo=None)
return trgfile.trgfile.gettuples_abstime(self)
| berndf/avg_q | python/avg_q/Presentation.py | Python | gpl-3.0 | 3,161 |
#!/usr/bin/env python
#encoding:utf8
#
# file: filter6_tests.py
# author: sl0
# date: 2013-03-06
#
import unittest
from adm6.filter6 import IP6_Filter, Ip6_Filter_Rule
from sys import stdout
from os.path import expanduser as homedir
from ipaddr import IPv6Network
from os import getenv as get_env
home_dir_replacement = get_env("HOME")
rule = {}
class Ip6_Filter_Rule_tests(unittest.TestCase):
"""
some tests for class Ip6_Filter_Rule
"""
def test_01_create_Filter_Rule(self):
"""
fr-01 create Filter_Rule object
"""
my_err = False
try:
f = Ip6_Filter_Rule(rule)
except:
my_err = True
self.assertFalse(my_err)
self.assertFalse(f['i_am_d'])
self.assertFalse(f['i_am_s'])
self.assertFalse(f['travers'])
self.assertFalse(f['insec'])
self.assertFalse(f['noif'])
self.assertFalse(f['nonew'])
self.assertFalse(f['nostate'])
self.assertEqual(f['sport'], u'1024:')
self.assertEqual(['Rule-Nr', 'Pair-Nr', 'RuleText'], f.CommentList)
self.assertEqual(['Output', 'debuglevel'], f.NeverDisplay)
displaylist = ['Rule-Nr', 'Pair-Nr', 'System-Name', 'System-Forward',
'OS', 'Asymmetric', 'RuleText', 'Source', 'Destin', 'Protocol',
'sport', 'dport', 'Action', 'nonew', 'noif', 'nostate', 'insec',
'i_am_s', 'i_am_d', 'travers', 'source-if', 'source-rn',
'src-linklocal', 'src-multicast', 'destin-if', 'destin-rn',
'dst-linklocal', 'dst-multicast', ]
self.assertEqual(displaylist, f.DisplayList)
#f['debuglevel'] = True
#print f
def test_02_produce_for_invalid_os_name(self):
"""
fr-02 produce for invalid os name
"""
my_err = False
try:
fr = Ip6_Filter_Rule(rule)
except:
my_err = True
fr['OS'] = 'Invalid os name'
self.assertRaises(ValueError, fr.produce ,stdout)
def test_03_produce_for_linux_as_traversed(self):
"""
fr-03 produce for linux as traversed host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
print "M:", fr.msg
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_04_produce_for_openbsd(self):
"""
fr-04 produce for OpenBSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
#expect = """# n o t y e t i m p l e m e n t e d !"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_05_produce_for_bsd(self):
"""
fr-05 produce for BSD
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'BSD'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_06_produce_for_opensolaris(self):
"""
fr-06 produce for OpenSolaris
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenSolaris'
except:
my_err = True
fr.produce(ofile)
expect = "# IPF is n o t y e t i m p l e m e n t e d !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_07_produce_for_wxp(self):
"""
fr-07 produce for WXP
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Win-XP-SP3'
except:
my_err = True
fr.produce(ofile)
expect = "# System should not forward until redesigned"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_08_repr_with_debuglevel(self):
"""
fr-08 repr with debuglevel
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
fr['debuglevel'] = True
value = str(fr)
print "V:", value
expect = """# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Forward : True #
# OS : Debian #
# Source : 2001:db8:1::1 #
# Destin : 2001:db8:2::1 #
# Protocol : tcp #
# sport : 1024: #
# dport : 22 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : False #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# src-linklocal : False #
# destin-if : eth0 #
# dst-linklocal : False #
"""
self.assertEquals(expect, value)
def test_09_repr_without_debuglevel(self):
"""
fr-09 repr without debuglevel
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
fr['debuglevel'] = False
fr['Abrakadabra'] = True
value = str(fr)
print "V:", value
expect = """# Rule-Nr : 1 #
# Pair-Nr : 1 #
# Abrakadabra : True #
"""
self.assertEquals(expect, value)
def test_10_produce_for_linux_as_source(self):
"""
fr-10 produce for linux as source host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -o eth1 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_11_produce_for_linux_as_source_icmpv6(self):
"""
fr-11 produce for linux as source host icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['noif'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -s 2001:db8:1::1 -d 2001:db8:2::1 -p icmpv6 --icmpv6-type echo-request -j ACCEPT -m comment --comment "1,1"\necho -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_12_produce_for_linux_as_source_nonew(self):
"""
fr-12 produce for linux as source host nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "21"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -o eth1 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 21 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 21 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
print fr.msg
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_13_produce_for_linux_as_dest(self):
"""
fr-13 produce for linux as dest host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = True
fr['travers'] = False
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A input___new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -o eth0 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_14_produce_for_linux_as_traversed(self):
"""
fr-14 produce for linux as traversed host
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_15_produce_for_linux_as_traversed_reject(self):
"""
fr-15 produce for linux reject rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j REJECT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j REJECT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_16_produce_for_linux_as_traversed_drop(self):
"""
fr-16 produce for linux drop rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "drop"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 1024: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j DROP -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 1024: --sport 22 -m state --state ESTABLISHED,RELATED -j DROP -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_17_produce_for_linux_as_traversed_insec(self):
"""
fr-17 produce for linux accept rule insec
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "22"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth1"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -p tcp --sport 0: --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1 -s 2001:db8:2::1 -p tcp --dport 0: --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_18_produce_for_linux_ip6(self):
"""
fr-18 produce for linux ip6 accept rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_19_produce_for_linux_ip6_forced(self):
"""
fr-19 produce for linux ip6 forced accept rule
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['forced'] = True
fr['i_am_s'] = True
fr['i_am_d'] = True
fr['travers'] = True
fr['noif'] = True
fr['nostate'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """/sbin/ip6tables -A output__new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A input___new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A forward_new -s 2001:db8:1::1 -d 2001:db8:2::1 -j ACCEPT -m comment --comment "1,1"
echo -n ".";"""
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_20_produce_for_linux_forward_forbidden(self):
"""
fr-20 produce for linux ip6 forward forbidden
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = False
fr['forced'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['noif'] = True
fr['nostate'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = """# System-Forward: False ==> no rule generated"""
self.maxDiff = None
#print "M:", fr.msg
self.assertEquals(expect, fr.msg)
def test_21_produce_for_linux_forward_linklocal(self):
"""
fr-21 produce for linux forward linklocal
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 1
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "fe80::e:db8:1:1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['forced'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "eth0"
fr['destin-if'] = "eth0"
fr['src-linklocal'] = True
fr['dst-linklocal'] = False
fr['OS'] = 'Debian'
except:
my_err = True
fr.produce(ofile)
expect = "# link-local ==> no forward"
self.maxDiff = None
#print "M:", fr.msg
self.assertEquals(expect, fr.msg)
def test_22_produce_for_openbsd_icmpv6(self):
"""
fr-22 produce for OpenBSD icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Protocol'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_23_produce_for_openbsd_tcp_nonew(self):
"""
fr-23 produce for OpenBSD tcp nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "4711"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_24_produce_for_openbsd_tcp_drop(self):
"""
fr-24 produce for OpenBSD tcp drop
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "4711"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_25_produce_for_openbsd_ip6(self):
"""
fr-25 produce for OpenBSD ip6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce(ofile)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_26_produce_for_openbsd_commented(self):
"""
fr-26 produce for OpenBSD commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, True)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_27_produce_for_openbsd_commented(self):
"""
fr-27 produce for OpenBSD forward forbidden
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['System-Forward'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# System does not forward by configuration"
#expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_28_produce_for_openbsd_noif(self):
"""
fr-28 produce for OpenBSD forward noif
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# OpenBSD implementation _not_ ready!"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_29_produce_for_openbsd_dst_linklocal(self):
"""
fr-29 produce for OpenBSD forward dst-link-local
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = True
fr['OS'] = 'OpenBSD'
except:
my_err = True
fr.produce_OpenBSD(ofile, False)
expect = "# dst-link-local ==> no filter rule generated"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_30_produce_for_wxp_tcp(self):
"""
fr-30 produce for wxp tcp
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "0:"
fr['noif'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_31_produce_for_wxp_icmpv6(self):
"""
fr-31 produce for wxp icmpv6
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "icmpv6"
fr['dport'] = "echo-request"
fr['noif'] = False
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_32_produce_for_wxp_nonew(self):
"""
fr-32 produce for wxp nonew
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "deny"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "25"
fr['nonew'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_33_produce_for_wxp_reject_insec(self):
"""
fr-33 produce for wxp reject insec
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "reject"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "tcp"
fr['dport'] = "25"
fr['insec'] = True
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, False)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_34_produce_for_wxp_ip6_commented(self):
"""
fr-34 produce for wxp ip6 commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = True
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# WXP-SP3 n o t y e t r e a d y !"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_34_produce_for_wxp_ip6_commented(self):
"""
fr-34 produce for wxp ip6 commented
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = False
fr['i_am_s'] = False
fr['i_am_d'] = False
fr['travers'] = True
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = False
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# System should not forward by configuration"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
def test_35_produce_for_wxp_dst_linklocal(self):
"""
fr-35 produce for wxp dst-linklocal
"""
my_err = False
try:
ofile = open("/dev/null", 'w')
fr = Ip6_Filter_Rule(rule)
fr['debuglevel'] = False
fr['Rule-Nr'] = 11
fr['Pair-Nr'] = 1
fr['Action'] = "accept"
fr['Source'] = "2001:db8:1::1"
fr['Destin'] = "2001:db8:2::1"
fr['Protocol'] = "ip6"
fr['dport'] = "all"
fr['insec'] = False
fr['System-Forward'] = False
fr['i_am_s'] = True
fr['i_am_d'] = False
fr['travers'] = False
fr['source-if'] = "sis0"
fr['destin-if'] = "sis0"
fr['src-linklocal'] = False
fr['dst-linklocal'] = True
fr['OS'] = 'winxp3'
except:
my_err = True
fr.produce_wxpsp3(ofile, True)
expect = "# dst-linklocal ==> no rule generated"
self.maxDiff = None
self.assertEquals(expect, fr.msg)
class Ip6_Filter_tests(unittest.TestCase):
'''some tests for class Ip6_Filter_Rule'''
def test_01_IP6_Filter_create_Debian(self):
"""
ft-01 IP6 Filter create an object for Debian
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Debian GNU/Linux wheezy"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Debian')
def test_02_IP6_Filter_create_OpenBSD(self):
"""
ft-02 IP6 Filter create an object for OpenBSD
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "OpenBSD 4.5"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'OpenBSD')
def test_03_IP6_Filter_create_OpenSolaris(self):
"""
ft-03 IP6 Filter create an object for OpenSolaris
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "OpenSolaris unknown version"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'OpenSolaris')
def test_04_IP6_Filter_create_win_xp_sp3(self):
"""
ft-04 IP6 Filter create an object for WXP SP3
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Win-XP-SP3"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Win-XP-SP3')
def test_05_IP6_Filter_create_unknown_os(self):
"""
ft-05 IP6 Filter create an object for unknown os
"""
#init__(self, debuglevel, path, name, os, fwd, asym, interfaces=None):
debug = False
name = "ns"
path = "desc/ns/"
os = "Unknown OS"
fwd = False
asym = False
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
self.assertEquals(fi.os, 'Unknown operating system for host: ns')
def test_06_IP6_Filter_append_first_rule(self):
"""
ft-06 IP6 Filter append first rule
"""
debug = False
name = "ns"
path = "desc/ns/"
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
fi.append(rule_one)
expect = [rule_one, ]
self.assertEqual(expect, fi.rules)
def test_07_IP6_Filter_mangle_start_exist(self):
"""
ft-07 IP6 Filter mangle-start exisiting file
"""
debug = False
name = "www"
#path = "HOME_DIR/adm6/desc/www"
mach_dir = "~/adm6/desc/www"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
fi.msg = ""
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-startup"
fi.mangle_file(ofile, file_to_read)
expect = "# start reading mangle-file: %s/" % (path)
expect += file_to_read
expect += "# mangle-startup file for testing \n"
value = fi.msg
self.assertEqual(expect, value)
def test_08_IP6_Filter_mangle_end_exist(self):
"""
ft-08 IP6 Filter mangle-end exisiting file
"""
debug = False
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/adm6"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-endup"
fi.msg = ""
fi.mangle_file(ofile, file_to_read)
expect = "# failed reading mangle-file: %s/" % (path)
#expect = "# start reading mangle-file: %s/" % (path)
expect += file_to_read
expect += ", but OK"
value = fi.msg
self.assertEqual(expect, value)
def test_09_IP6_Filter_mangle_end_non_exist(self):
"""
ft-09 IP6 Filter mangle-end non exisiting file
"""
debug = False
name = "adm6"
#path = "HOME_DIR/adm6/desc/adm6"
mach_dir = "~/adm6/desc/adm6"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofile = open("/dev/null", 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
file_to_read = "mangle-endup"
fi.msg = ""
fi.mangle_file(ofile, file_to_read)
temp = "# failed reading mangle-file: %s/" % (path)
temp += file_to_read
temp = "# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK"
expect = temp.replace("HOME_DIR", home_dir_replacement)
value = fi.msg
self.assertEqual(expect, value)
def test_10_IP6_Filter_final_this_rule(self):
"""
ft-10 IP6 Filter final this rule
"""
debug = True
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/ns"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofn = "/dev/null"
ofile = open(ofn, 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(2) # Rule-Nr.
rule.append(3) # Pair-Nr.
rule.append(True)
rule.append(False)
rule.append(IPv6Network('fe80::1')) # source
rule.append(IPv6Network('ff80::4711')) # destin
rule.append('eth0') # source-if
rule.append(3) # source-rn
rule.append('eth0') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711:4713') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE') # append options at last
fi.rules.append(rule)
fi.final_this_rule(rule, ofile)
value = fi.msg
expect = """# ---------------------------------------------------------------------------- #
# Rule-Nr : 2 #
# Pair-Nr : 3 #
# System-Name : ns #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : RuleText #
# Source : fe80::1/128 #
# Destin : ff80::4711/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711:4713 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : False #
# travers : False #
# source-if : eth0 #
# source-rn : 3 #
# src-linklocal : True #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : True #
/sbin/ip6tables -A output__new -s fe80::1/128 -d ff80::4711/128 -p udp --sport 1024: --dport 4711:4713 -j ACCEPT -m comment --comment "2,3"
/sbin/ip6tables -A input___new -d fe80::1/128 -s ff80::4711/128 -p udp --dport 1024: --sport 4711:4713 -j ACCEPT -m comment --comment "2,3"
echo -n ".";"""
value = fi.msg
self.assertEqual(expect, value)
def test_11_IP6_Filter_final_this_rule_forced_linklocal(self):
"""
ft-11 IP6 Filter final this rule forced linklocal
"""
debug = True
name = "ns"
path = "HOME_DIR/adm6/desc/ns"
mach_dir = "~/adm6/desc/ns"
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
rule_one = ['s', 'd', 'ip6', 'all', 'accept', "#", 'test-comment']
ofn = "/dev/null"
ofile = open(ofn, 'w')
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(2) # Rule-Nr.
rule.append(3) # Pair-Nr.
rule.append(True) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('fe80::1')) # source
rule.append(IPv6Network('ff80::4711')) # destin
rule.append('eth0') # source-if
rule.append(3) # source-rn
rule.append('eth0') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711:4713') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE FORCED') # options at last
fi.rules.append(rule)
fi.final_this_rule(rule, ofile)
value = fi.msg
expect = """# ---------------------------------------------------------------------------- #
# Rule-Nr : 2 #
# Pair-Nr : 3 #
# System-Name : ns #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : RuleText #
# Source : fe80::1/128 #
# Destin : ff80::4711/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711:4713 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : True #
# travers : True #
# source-if : eth0 #
# source-rn : 3 #
# src-linklocal : True #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : True #
# link-local ==> no forward"""
value = fi.msg
self.assertEqual(expect, value)
def test_12_IP6_Filter_mach_output_as_src(self):
"""
ft-12 IP6 Filter mach_output as src
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(True) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth0') # destin-if
rule.append(1) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NOIF NOSTATE FORCED') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : True #
# nostate : True #
# insec : False #
# i_am_s : True #
# i_am_d : True #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth0 #
# destin-rn : 1 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A output__new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A input___new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A input___new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";/sbin/ip6tables -A forward_new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
self.assertEquals(expect, value)
def test_13_IP6_Filter_mach_output_as_travers(self):
"""
ft-13 IP6 Filter mach_output as travers
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NOSTATE') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : True #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
#print "M:", value
expect = temp.replace("HOME_DIR", home_dir_replacement)
self.assertEquals(expect, value)
def test_14_IP6_Filter_mach_output_as_stateful_travers(self):
"""
ft-14 IP6 Filter mach_output as stateful travers
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = False
ofilename = "/dev/null"
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(False) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-13 23:23 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : False #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : False #
# noif : False #
# nostate : False #
# insec : False #
# i_am_s : False #
# i_am_d : False #
# travers : True #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A forward_new -i eth0 -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 1024: --dport 4711 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A forward_new -i eth1 -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 1024: --sport 4711 -m state --state ESTABLISHED,RELATED -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
value_len = len(value)
expect_len = len(expect)
self.assertEquals(expect_len, value_len)
self.assertEquals(expect, value)
def test_15_IP6_Filter_mach_output_as_real_file(self):
"""
ft-15 IP6 Filter mach_output as real file
"""
debug = True
name = "adm6"
mach_dir = "~/adm6/desc/%s" % (name)
path = homedir(mach_dir)
os = "Debian GNU/Linux"
fwd = False
asym = True
ofilename = None
fi = IP6_Filter(debug, path, name, os, fwd, asym, None)
self.assertIsInstance(fi, IP6_Filter)
rule = []
rule.append("should be RuleText") # RuleText
rule.append(True) # System-Fwd
rule.append(1) # Rule-Nr.
rule.append(1) # Pair-Nr.
rule.append(False) # i_am_s
rule.append(True) # i_am_d
rule.append(IPv6Network('2001:db8:1::1')) # source
rule.append(IPv6Network('2001:db8:2::11')) # destin
rule.append('eth0') # source-if
rule.append(1) # source-rn
rule.append('eth1') # destin-if
rule.append(3) # destin-rn
rule.append('udp') # protocol
rule.append('4711') # dport
rule.append('accept') # action
rule.append('NONEW NOIF INSEC') # options at last
fi.rules.append(rule)
fi.mach_output(ofilename)
value = fi.msg
temp = """#!/bin/bash
#
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## a d m 6 - A Device Manager for IPv6 packetfiltering ##"
echo "## ##"
echo "## version: 0.2 ##"
echo "## ##"
echo "## device-name: adm6 ##"
echo "## device-type: Debian GNU/Linux ##"
echo "## ##"
echo "## date: 2013-03-18 23:38 ##"
echo "## author: Johannes Hubertz, hubertz-it-consulting GmbH ##"
echo "## ##"
echo "## license: GNU general public license version 3 ##"
echo "## or any later version ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## some magic abbreviations follow ##"
echo "## ##"
#
#POLICY_A='ACCEPT'
POLICY_D='DROP'
#
I6='/sbin/ip6tables '
IP6I='/sbin/ip6tables -A input___new '
IP6O='/sbin/ip6tables -A output__new '
IP6F='/sbin/ip6tables -A forward_new '
#
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
for chain in $CHAINS
do
/sbin/ip6tables -N ${chain}_act >/dev/null 2>/dev/null
/sbin/ip6tables -N ${chain}_new
done
# but ignore all the boring fault-messages
$I6 -P INPUT $POLICY_D
$I6 -P OUTPUT $POLICY_D
$I6 -P FORWARD $POLICY_D
#
# some things need to pass,
# even if you don't like them
# do local and multicast on every interface
LOCAL="fe80::/10"
MCAST="ff02::/10"
#
$IP6I -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
$IP6O -p ipv6-icmp -s ${LOCAL} -d ${LOCAL} -j ACCEPT
#
$IP6I -p ipv6-icmp -s ${MCAST} -j ACCEPT
$IP6I -p ipv6-icmp -d ${MCAST} -j ACCEPT
$IP6O -p ipv6-icmp -s ${MCAST} -j ACCEPT
#
# all prepared now, individual mangling and rules following
#
# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-startup, but OK
# ---------------------------------------------------------------------------- #
# Rule-Nr : 1 #
# Pair-Nr : 1 #
# System-Name : adm6 #
# System-Forward : True #
# OS : Debian #
# Asymmetric : True #
# RuleText : should be RuleText #
# Source : 2001:db8:1::1/128 #
# Destin : 2001:db8:2::11/128 #
# Protocol : udp #
# sport : 1024: #
# dport : 4711 #
# Action : accept #
# nonew : True #
# noif : True #
# nostate : True #
# insec : True #
# i_am_s : False #
# i_am_d : True #
# travers : False #
# source-if : eth0 #
# source-rn : 1 #
# src-linklocal : False #
# src-multicast : False #
# destin-if : eth1 #
# destin-rn : 3 #
# dst-linklocal : False #
# dst-multicast : False #
/sbin/ip6tables -A input___new -s 2001:db8:1::1/128 -d 2001:db8:2::11/128 -p udp --sport 0: --dport 4711 -j ACCEPT -m comment --comment "1,1"
/sbin/ip6tables -A output__new -d 2001:db8:1::1/128 -s 2001:db8:2::11/128 -p udp --dport 0: --sport 4711 -j ACCEPT -m comment --comment "1,1"
echo -n ".";# failed reading mangle-file: HOME_DIR/adm6/desc/adm6/mangle-endup, but OK#
#$IP6I -p tcp --dport 22 -j ACCEPT
#$IP6O -p tcp --sport 22 -j ACCEPT
#
# allow ping and pong always (al gusto)
#$IP6O -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6I -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
##
#$IP6I -p ipv6-icmp --icmpv6-type echo-request -j ACCEPT
#$IP6O -p ipv6-icmp --icmpv6-type echo-reply -j ACCEPT
#
#ICMPv6types="${ICMPv6types} destination-unreachable"
ICMPv6types="${ICMPv6types} echo-request"
ICMPv6types="${ICMPv6types} echo-reply"
ICMPv6types="${ICMPv6types} neighbour-solicitation"
ICMPv6types="${ICMPv6types} neighbour-advertisement"
ICMPv6types="${ICMPv6types} router-solicitation"
ICMPv6types="${ICMPv6types} router-advertisement"
for icmptype in $ICMPv6types
do
$IP6I -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
$IP6O -p ipv6-icmp --icmpv6-type $icmptype -j ACCEPT
done
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j LOG --log-prefix "unreach: " -m limit --limit 30/second --limit-burst 60
$IP6I -p ipv6-icmp --icmpv6-type destination-unreachable -j ACCEPT
#
CHAINS=""
CHAINS="$CHAINS input__"
CHAINS="$CHAINS output_"
CHAINS="$CHAINS forward"
#set -x
for chain in $CHAINS
do
/sbin/ip6tables -E "${chain}_act" "${chain}_old"
/sbin/ip6tables -E "${chain}_new" "${chain}_act"
done
#
$I6 -F INPUT
$I6 -A INPUT -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 0 -j DROP
$I6 -A INPUT -m rt --rt-type 2 -j LOG --log-prefix "rt-2: " -m limit --limit 3/second --limit-burst 6
$I6 -A INPUT -m rt --rt-type 2 -j DROP
$I6 -A INPUT -i lo -j ACCEPT
$I6 -A INPUT --jump input___act
#
$I6 -F OUTPUT
$I6 -A OUTPUT -o lo -j ACCEPT
$I6 -A OUTPUT --jump output__act
#
$I6 -F FORWARD
$I6 -A FORWARD -m rt --rt-type 0 -j LOG --log-prefix "rt-0: " -m limit --limit 3/second --limit-burst 6
$I6 -A FORWARD -m rt --rt-type 0 -j DROP
$I6 -A FORWARD --jump forward_act
#
for chain in $CHAINS
do
/sbin/ip6tables -F "${chain}_old"
/sbin/ip6tables -X "${chain}_old"
done
$I6 -F logdrop >/dev/null 2>/dev/null
$I6 -X logdrop >/dev/null 2>/dev/null
$I6 -N logdrop
$I6 -A INPUT --jump logdrop
$I6 -A OUTPUT --jump logdrop
$I6 -A FORWARD --jump logdrop
$I6 -A logdrop -j LOG --log-prefix "drp: " -m limit --limit 3/second --limit-burst 6
$I6 -A logdrop -j DROP
#
/sbin/ip6tables-save -c >/root/last-filter
echo "**********************************************************************"
echo "**********************************************************************"
echo "## ##"
echo "## End of generated filter-rules ##"
echo "## ##"
echo "**********************************************************************"
echo "**********************************************************************"
# EOF
"""
expect = temp.replace("HOME_DIR", home_dir_replacement)
value_len = len(value)
expect_len = len(expect)
self.assertEquals(expect_len, value_len)
if __name__ == "__main__":
unittest.main()
| sl0/adm6 | tests/test_03_filter6.py | Python | gpl-3.0 | 102,052 |
#!/usr/bin/env python
#
# ParameterWeaver: a code generator to handle command line parameters
# and configuration files for C/C++/Fortran/R/Octave
# Copyright (C) 2013 Geert Jan Bex <geertjan.bex@uhasselt.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''Module to test the parameter definition parser for Fortran'''
import unittest
from vsc.parameter_weaver.params import Parameter, ParameterParser, WeaverError
from vsc.parameter_weaver.base_validator import BaseValidator, ParameterDefinitionError
from vsc.parameter_weaver.fortran.types import Integer, DoublePrecision, CharacterArray
from vsc.parameter_weaver.fortran.validator import Validator
class FortranParserTest(unittest.TestCase):
'''Tests for Fortran parameter definition parser'''
def setUp(self):
'''Set up parameter list to be expected from valid viles'''
self._parameters = [
Parameter(Integer(), 'a', '10'),
Parameter(DoublePrecision(), 'f', '0.19D00'),
Parameter(CharacterArray(), 'str', 'abcde')
]
self._parameters_w_description = [
Parameter(Integer(), 'a', '10'),
Parameter(DoublePrecision(), 'f', '0.19D00', 'relative error'),
Parameter(CharacterArray(), 'str', 'a;bcde', 'string to print')
]
self._parser = ParameterParser(Validator())
def test_simple_tab_separated_valid(self):
'''Parse a simple file that is well-formed and valid'''
file_name = 'tests/good_fortran.txt'
t = CharacterArray()
try:
self.assertEqual(self._parameters, self._parser.parse(file_name))
except ParameterDefinitionError as error:
self.fail(str(error))
if __name__ == '__main__':
unittest.main()
| gjbex/parameter-weaver | src/fortran_parser_test.py | Python | gpl-3.0 | 2,335 |
sabor = input()
quantidade = int(input())
if sabor.lower() == "morango" or sabor.lower() == "cereja":
total = quantidade*4.50
elif sabor.lower() == "damasco" or sabor.lower() == "siriguela":
total = quantidade*3.80
else:
total = quantidade*2.75
print("%.2f"%total)
if quantidade > 2:
print ("COM CALDA")
else:
print("SEM CALDA")
| SANDEISON/The-Huxley | Python/Sorveteria Tropical,py.py | Python | gpl-3.0 | 351 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import time
import sickbeard
import generic
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import AuthException
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-sd&fr=false', self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u"Womble's Index cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()
| bckwltn/SickRage | sickbeard/providers/womble.py | Python | gpl-3.0 | 2,302 |
# -*-coding:utf-8-*-
__all__ = ['database', 'guide', 'gff', 'uniprot']
| wolfsonliu/crispr | pycas/utils/__init__.py | Python | gpl-3.0 | 72 |
from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey
from project.database import Base
from project.database import db_session
from sqlalchemy.orm import relationship
from sqlalchemy_utils import EmailType
from flask.ext.babel import lazy_gettext as _
# FIXME: move to extensions
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Roles(Base):
__tablename__ = 'role'
id = Column(Integer, primary_key=True)
role_name = Column(String(50), unique=True, nullable=False, info={'label': _('role name')})
url = Column(String(250), nullable=False, info={'label': _('website')})
parent = Column(Integer, nullable=False, info={'label': _('parent')})
description = Column(String(50), info={'label': _('description')})
def __init__(self, **kwargs):
super(Roles, self).__init__(**kwargs)
def __repr__(self):
return self.role_name
class User_Role(Base):
__tablename__ = 'user_role'
id = Column(Integer, primary_key=True)
role = Column(Integer,ForeignKey('role.id'), info={'label': _('role')})
user = Column(Integer,ForeignKey('profile.id'), info={'label': _('user')})
def __init__(self, **kwargs):
super(User_Role, self).__init__(**kwargs)
def __repr__(self):
return self.role
class Profile(Base):
__tablename__ = 'profile'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False, info={'label': _('username')})
password = Column(String(100), nullable=False, info={'label': _('password')})
group_list = Column(String(50), info={'label': _('group list')})
phone = Column(String(50), info={'label': _('phone')})
email = Column(EmailType, info={'label': _('email')})
registered_at = Column(String(50), info={'label': _('registered at')})
firstName = Column(String(50), info={'label': _('first name')})
lastName = Column(String(50), info={'label': _('last name')})
sex = Column(Boolean, info={'label': _('sex')})
birthday = Column(String(50), info={'label': _('birthday')})
avatar = Column(String(50), info={'label': _('avatar')})
country = Column(String(50), info={'label': _('country')})
city = Column(String(50), info={'label': _('city')})
creator = Column(Integer, info={'label': _('creator')})
def __init__(self, **kwargs):
super(Profile, self).__init__(**kwargs)
def __repr__(self):
return self.username
def can(self, roles):
for item in roles:
request_role = Roles.query.filter(Roles.role_name == item).first()
try:
user_have_role = User_Role.query.filter(
User_Role.role == request_role.id ,
User_Role.user == self.id
).first()
if user_have_role:
return True
except:
pass
return False
def has_group(self, group):
"""
"""
if group == self.group_list:
return True
return False
def __unicode__(self):
return self.username
class Log(Base):
'''
table log
'''
__tablename__ = 'log_actions'
id = Column(Integer, primary_key=True)
log_date = Column(String(50), nullable=False, info={'label': _('date')})
log_desc = Column(String(300), info={'label': _('description')})
log_user = Column(Integer, ForeignKey('profile.id'),info={'label': _('user name')})
user = relationship(
Profile,
# backref='profile'
)
def __init__(self, **kwargs):
super(Log, self).__init__(**kwargs)
def __repr__(self):
return self.log_desc
| PyIran/website | project/apps/user/models.py | Python | gpl-3.0 | 3,333 |
# Generated by Django 2.1.3 on 2019-02-22 22:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20170418_0219'),
]
operations = [
migrations.AddField(
model_name='profile',
name='terms_accepted',
field=models.DateTimeField(blank=True, null=True),
),
]
| LCOGT/valhalla | valhalla/accounts/migrations/0005_profile_terms_accepted.py | Python | gpl-3.0 | 406 |
import json
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../scripts'))
from flask import Flask, render_template, request
from py2neo import neo4j
from ollie import pipeline
app = Flask(__name__)
"""@app.route('/render', method=['POST'])
def render():
pairs = json.loads(request.form['data'])
edges = []
for pair in pairs:
n1 = pair[0]
n2 = pair[2]
rel = pair[1]
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel)})
return render_template('index4.html', links=edges)
"""
@app.route('/graph', methods=['POST', 'GET'])
def graph():
if request.method == 'POST':
f = request.files['file']
f.save('/tmp/doc.txt')
pairs = pipeline('/tmp/doc.txt')
edges = []
for pair in pairs:
n1 = pair[0]
n2 = pair[2]
rel = pair[1]
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel)})
#return render_template('graph.html', links=edges)
return json.dumps(edges)
else:
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Relationship, 'relations')
q = relations.query('relation_name:*')
pairs = []
for rel in q:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/graph/<concept>')
def concept(concept):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
q = relations.query('concept_name:%s' % concept)
pairs = []
try:
concept = q.next()
except:
return json.dumps(pairs)
rels = concept.match()
for rel in rels:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/search/<query>')
def search(query):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
concepts = graph_db.get_index(neo4j.Node, 'concepts')
query = '*' + '*'.join(query.strip().split(' ')) + '*'
print query
q = concepts.query('concept_name:%s' % str(query))
pairs = []
try:
concept = q.next()
except:
return json.dumps(pairs)
rels = concept.match()
for rel in rels:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/graphical/<concepts>')
def graphical(concepts):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
query = '"' + '" OR "'.join(concepts.split(',')) + '"'
q = relations.query('concept_name:(%s)' % str(query))
pairs = []
rels = []
concept = None
while True:
try:
concept = q.next()
except:
break
rels += concept.match()
if not concept:
return json.loads(pairs)
nodes = {}
edges = []
for rel in rels:
n1 = rel.start_node['name']
n2 = rel.end_node['name']
if n1 not in nodes:
nodes[str(n1)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
if n2 not in nodes:
nodes[str(n2)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
nodes[str(rel.type)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
#edges.append([str(n1), str(rel.type), {"length":50.00, "stroke":"rgba(135,234,135,1.00)"}])
#edges.append([str(rel.type), str(n2), {"length":50.00, "stroke":"rgba(135,234,135,1.00)"}])
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel.type)})
return render_template('graph.html', links=edges, nodes=nodes)
@app.route('/')
def home():
return render_template('new.html')
@app.route('/browse')
def browse():
concepts = []
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
q = relations.query('concept_name:*')
while True:
try:
concept = q.next()
concepts.append(str(concept['name']))
except:
break
return render_template('browse.html', concepts=concepts)
app.debug = True
app.run()
| dash1291/major | webserver/app.py | Python | gpl-3.0 | 4,561 |
from __future__ import division
__author__ = 'wenqihe'
import sys
import random
import math
class PLSVM:
def __init__(self, feature_size, label_size, type_hierarchy, lambda_reg=0.1, max_iter=5000, threshold=0.5, batch_size=100):
self._feature_size = feature_size
self._label_size = label_size
self._type_hierarchy = type_hierarchy
self._weight = [[0 for col in range(feature_size)] for row in range(label_size)]
for i in xrange(label_size):
for j in xrange(feature_size):
self._weight[i][j] = random.uniform(0, 1)
self._lambda_reg = lambda_reg
self._max_iter = max_iter
self._threshold = threshold
self._batch_size = batch_size
def fit(self, train_x, train_y):
"""
:param train_x: list of list
:param train_y: list of list
:return:
"""
m = len(train_y)
batch = int(math.ceil(m/self._batch_size))
for t in xrange(1, self._max_iter):
eta_t = 1.0/(self._lambda_reg*t)
dW = [[0 for col in range(self._feature_size)] for row in range(self._label_size)]
for j in xrange(self._batch_size):
i = random.randint(0, m-1)
x = train_x[i]
y = train_y[i]
ny = [k for k in range(self._label_size) if k not in y]
yi = self.find_max(y, x)
nyi = self.find_max(ny, x)
for feature in x:
self._weight[yi][feature] = self._weight[yi][feature]*(1-eta_t*self._lambda_reg) + eta_t
self._weight[nyi][feature] = self._weight[nyi][feature]*(1-eta_t*self._lambda_reg) - eta_t
# self.update_weight(dW, eta_t, 1)
sys.stdout.write('{0} iteration done.\r'.format(t))
sys.stdout.flush()
def predict(self, x):
labels = set()
parent_mapping = self._type_hierarchy._type_hierarchy
scores = []
max_index = 0
max_value = self.inner_prod(self._weight[0], x)
scores.append(max_value)
for i in xrange(1, self._label_size):
temp = self.inner_prod(self._weight[i], x)
scores.append(temp)
if temp>max_value:
max_index = i
max_value = temp
# print scores
labels.add(max_index)
# Add parent of max_index if any
temp = max_index
while temp in parent_mapping:
labels.add(parent_mapping[temp])
temp = parent_mapping[temp]
# add child of max_index if meeting threshold
temp = max_index
while temp != -1:
max_sub_index = -1
max_sub_score = -sys.maxint
for child in parent_mapping:
# check the maximum subtype
if parent_mapping[child] == temp:
if child < self._label_size:
# print child
if max_sub_score < scores[child]:
max_sub_index = child
max_sub_score = scores[child]
if max_sub_index != -1 and max_sub_score > self._threshold:
labels.add(max_sub_index)
temp = max_sub_index
return labels
def find_max(self, Y, x):
random.shuffle(Y)
y = Y[0]
max_value = self.inner_prod(self._weight[y], x)
for i in xrange(1, len(Y)):
temp = self.inner_prod(self._weight[Y[i]], x)
if temp > max_value:
y = Y[i]
max_value = temp
return y
def update_weight(self, dW, eta_t, m):
for i in xrange(self._label_size):
# L2 = 0
for j in xrange(self._feature_size):
self._weight[i][j] = self._weight[i][j]*(1-eta_t*self._lambda_reg) + eta_t*dW[i][j]/m
# L2 += self._weight[i][j] * self._weight[i][j]
# if L2>0:
# factor = min(1, 1/(math.sqrt(self._lambda_reg)*math.sqrt(L2)))
# if factor < 1:
# for j in xrange(self._feature_size):
# self._weight[i][j] *= factor
@staticmethod
def inner_prod(weight, x):
result = 0
for feature in x:
result += weight[feature]
return result
@staticmethod
def kernel(x1, x2):
i1 = 0
i2 = 0
result = 0
while i1<len(x1) and i2<len(x2):
if x1[i1] == x2[i2]:
result += 1
i1 += 1
i2 += 1
elif x1[i1] < x2[i2]:
i1 += 1
else:
i2 += 1
return result
| shanzhenren/PLE | Classifier/PLSVM.py | Python | gpl-3.0 | 4,745 |
import numpy as np
from data_management.database import Database
from utils.utils import log, today
class DataManager(object):
name = "DataManager"
def __init__(self, monkey, starting_point="2016-12-01", end_point=today(), database_path=None):
self.db = Database(database_path)
self.monkey = monkey
self.starting_point = starting_point
self.end_point = end_point
def select_relevant_dates(self, dates_list):
log("Starting point: {}.".format(self.starting_point), self.name)
log("End point: {}.".format(self.end_point), self.name)
starting_point = [int(i) for i in self.starting_point.split("-")]
end_point = [int(i) for i in self.end_point.split("-")]
relevant_dates = []
for str_date in dates_list:
date = [int(i) for i in str_date.split("-")]
# If year of date is between the years of starting point and end point (but not equal to them)
if starting_point[0] < date[0] < end_point[0]:
relevant_dates.append(str_date)
elif starting_point[0] > date[0] or date[0] > end_point[0]:
continue
# If year of date is equal to the years of starting point and end point (which are equal)
elif date[0] == starting_point[0] == end_point[0]:
if starting_point[1] > date[1] or date[1] > end_point[1]:
continue
elif (end_point[1] > date[1] > starting_point[1]) \
or (date[1] == starting_point[1] == end_point[1]
and starting_point[2] <= date[2] <= end_point[2]) \
or (date[1] == starting_point[1]
and date[2] >= starting_point[2]) \
or (date[1] == end_point[1]
and date[2] <= end_point[2]):
relevant_dates.append(str_date)
# If year of date is equal to the year of starting point (and is inferior to the year of end point)
elif date[0] == starting_point[0]:
if (date[1] > starting_point[1])\
or (date[1] == starting_point[1]
and date[2] >= starting_point[2]):
relevant_dates.append(str_date)
# If year of date is equal to the year of starting point (and is superior to the year of starting point)
elif date[0] == end_point[0]:
if (date[1] < end_point[1]) \
or (date[1] == end_point[1]
and date[2] <= end_point[2]):
relevant_dates.append(str_date)
return relevant_dates
def get_dates(self):
assert self.db.table_exists("summary")
all_dates = np.unique(self.db.read_column(table_name="summary", column_name='date', monkey=self.monkey))
assert len(all_dates)
dates = self.select_relevant_dates(all_dates)
log("N dates: {}.".format(len(dates)), self.name)
log("Relevant dates: {}".format(dates), self.name)
return dates
def get_errors_p_x0_x1_choices_from_db(self, dates):
p = {"left": [], "right": []}
x0 = {"left": [], "right": []}
x1 = {"left": [], "right": []}
error = []
choice = []
session = []
date_list = []
for idx, date in enumerate(sorted(dates)):
session_table = \
self.db.read_column(table_name="summary", column_name='session_table',
monkey=self.monkey, date=date)
if type(session_table) == list:
session_table = session_table[-1]
error_session = self.db.read_column(table_name=session_table, column_name="error")
choice_session = self.db.read_column(table_name=session_table, column_name="choice")
error += error_session
choice += choice_session
session += [idx, ] * len(error_session)
date_list += [date, ] * len(error_session)
for side in ["left", "right"]:
p[side] += \
[float(i) for i in self.db.read_column(table_name=session_table, column_name='{}_p'.format(side))]
x0[side] += \
[int(i) for i in self.db.read_column(table_name=session_table, column_name='{}_x0'.format(side))]
x1[side] += \
[int(i) for i in self.db.read_column(table_name=session_table, column_name='{}_x1'.format(side))]
return error, p, x0, x1, choice, session, date_list
def filter_valid_trials(self, error, p, x0, x1, choice, session, date):
new_p = {"left": [], "right": []}
new_x0 = {"left": [], "right": []}
new_x1 = {"left": [], "right": []}
new_choice = []
new_session = []
new_date = []
valid_trials = np.where(np.asarray(error) == "None")[0]
log("N valid trials: {}.".format(len(valid_trials)), self.name)
for valid_idx in valid_trials:
new_date.append(date[valid_idx])
new_session.append(session[valid_idx])
new_choice.append(choice[valid_idx])
for side in ["left", "right"]:
new_p[side].append(p[side][valid_idx])
new_x0[side].append(x0[side][valid_idx])
new_x1[side].append(x1[side][valid_idx])
for side in ["left", "right"]:
new_p[side] = np.asarray(new_p[side])
new_x0[side] = np.asarray(new_x0[side])
new_x1[side] = np.asarray(new_x1[side])
new_choice = np.asarray(new_choice)
new_session = np.asarray(new_session)
new_date = np.asarray(new_date)
return new_p, new_x0, new_x1, new_choice, new_session, new_date
def run(self):
log("Import data for {}.".format(self.monkey), self.name)
dates = self.get_dates()
assert len(dates), "Fatal: No valid dates found, \n" \
"Please give a look at the analysis parameters (analysis/parameters/parameters.py)."
error, p, x0, x1, choice, session, date = self.get_errors_p_x0_x1_choices_from_db(dates)
p, x0, x1, choice, session, date = self.filter_valid_trials(error, p, x0, x1, choice, session, date)
assert sum(x1["left"]) == 0 and sum(x1["right"]) == 0
log("Done!", self.name)
return {"p": p, "x0": x0, "x1": x1, "choice": choice, "session": session, "date": date}
def import_data(monkey, starting_point="2016-12-01", end_point=today(), database_path=None):
d = DataManager(monkey=monkey, starting_point=starting_point, end_point=end_point, database_path=database_path)
return d.run()
def main():
d = DataManager(monkey='Havane', starting_point="2016-08-01", end_point=today())
return d.get_dates()
if __name__ == "__main__":
main()
| AurelienNioche/MonkeyProject | data_management/data_manager.py | Python | gpl-3.0 | 6,946 |
#!/usr/bin/env python
"""
crate_anon/preprocess/postcodes.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Fetches UK postcode information and creates a database.**
Code-Point Open, CSV, GB
- https://www.ordnancesurvey.co.uk/business-and-government/products/opendata-products.html
- https://www.ordnancesurvey.co.uk/business-and-government/products/code-point-open.html
- https://www.ordnancesurvey.co.uk/opendatadownload/products.html
- http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/
Office for National Statistics Postcode Database (ONSPD):
- https://geoportal.statistics.gov.uk/geoportal/catalog/content/filelist.page
- e.g. ONSPD_MAY_2016_csv.zip
- http://www.ons.gov.uk/methodology/geography/licences
Background:
- OA = Output Area
- smallest: >=40 households, >=100 people
- 181,408 OAs in England & Wales
- LSOA = Lower Layer Super Output Area
- 34,753 LSOAs in England & Wales
- MSOA = Middle Layer Super Output Area
- 7,201 MSOAs in England & Wales
- WZ = Workplace Zone
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#workplace-zone-wz
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#output-area-oa
""" # noqa
from abc import ABC, ABCMeta, abstractmethod
import argparse
import csv
import datetime
import logging
import os
import sys
# import textwrap
from typing import (Any, Dict, Generator, Iterable, List, Optional, TextIO,
Tuple)
from cardinal_pythonlib.argparse_func import RawDescriptionArgumentDefaultsHelpFormatter # noqa
from cardinal_pythonlib.dicts import rename_key
from cardinal_pythonlib.extract_text import wordwrap
from cardinal_pythonlib.fileops import find_first
from cardinal_pythonlib.logs import configure_logger_for_colour
import openpyxl
from openpyxl.cell.cell import Cell
import prettytable
from sqlalchemy import (
Column,
create_engine,
Date,
Integer,
Numeric,
String,
)
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.schema import MetaData, Table
# import xlrd
from crate_anon.anonymise.constants import CHARSET, TABLE_KWARGS
from crate_anon.common.constants import EnvVar
log = logging.getLogger(__name__)
metadata = MetaData()
if EnvVar.GENERATING_CRATE_DOCS in os.environ:
DEFAULT_ONSPD_DIR = "/path/to/unzipped/ONSPD/download"
else:
DEFAULT_ONSPD_DIR = os.path.join(
os.path.expanduser("~"), "dev", "ons", "ONSPD_Nov2019"
)
DEFAULT_REPORT_EVERY = 1000
DEFAULT_COMMIT_EVERY = 10000
YEAR_MONTH_FMT = "%Y%m"
CODE_LEN = 9 # many ONSPD codes have this length
NAME_LEN = 80 # seems about right; a bit more than the length of many
# =============================================================================
# Ancillary functions
# =============================================================================
def convert_date(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a
:class:`datetime.datetime` or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value:
d[key] = datetime.datetime.strptime(value,
YEAR_MONTH_FMT)
else:
d[key] = None
def convert_int(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to an int or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = int(value)
def convert_float(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a float or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = float(value)
def values_from_row(row: Iterable[Cell]) -> List[Any]:
"""
Returns all values from a spreadsheet row.
For the ``openpyxl`` interface to XLSX files.
"""
values = [] # type: List[Any]
for cell in row:
values.append(cell.value)
return values
def commit_and_announce(session: Session) -> None:
"""
Commits an SQLAlchemy ORM session and says so.
"""
log.info("COMMIT")
session.commit()
# =============================================================================
# Extend SQLAlchemy Base class
# =============================================================================
class ExtendedBase(object):
"""
Mixin to extend the SQLAlchemy ORM Base class by specifying table creation
parameters (specifically, for MySQL, to set the character set and
MySQL engine).
Only used in the creation of Base; everything else then inherits from Base
as usual.
See
http://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/mixins.html
"""
__table_args__ = TABLE_KWARGS
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
# =============================================================================
# Go to considerable faff to provide type hints for lookup classes
# =============================================================================
class GenericLookupClassMeta(DeclarativeMeta, ABCMeta):
"""
To avoid: "TypeError: metaclass conflict: the metaclass of a derived class
must be a (non-strict) subclass of the metaclasses of all its bases".
We want a class that's a subclass of Base and ABC. So we can work out their
metaclasses:
.. code-block:: python
from abc import ABC
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import MetaData
class ExtendedBase(object):
__table_args__ = {'mysql_charset': 'utf8', 'mysql_engine': 'InnoDB'}
metadata = MetaData()
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
type(Base) # metaclass of Base: <class: 'sqlalchemy.ext.declarative.api.DeclarativeMeta'>
type(ABC) # metaclass of ABC: <class 'abc.ABCMeta'>
and thus define this class to inherit from those two metaclasses, so it can
be the metaclass we want.
""" # noqa
pass
class GenericLookupClassType(Base, ABC, metaclass=GenericLookupClassMeta):
"""
Type hint for our various simple lookup classes.
Alternatives that don't work: Type[Base], Type[BASETYPE], type(Base).
"""
__abstract__ = True # abstract as seen by SQLAlchemy
# ... avoids SQLAlchemy error: "sqlalchemy.exc.InvalidRequestError: Class
# <class '__main__.GenericLookupClassType'> does not have a __table__ or
# __tablename__ specified and does not inherit from an existing
# table-mapped class."
@abstractmethod
def __call__(self, *args, **kwargs) -> None:
# Represents __init__... not sure I have this quite right, but it
# appeases PyCharm; see populate_generic_lookup_table()
pass
@property
@abstractmethod
def __table__(self) -> Table:
pass
@property
@abstractmethod
def __tablename__(self) -> str:
pass
@property
@abstractmethod
def __filename__(self) -> str:
pass
# =============================================================================
# Models: all postcodes
# =============================================================================
class Postcode(Base):
"""
Maps individual postcodes to... lots of things. Large table.
"""
__tablename__ = 'postcode'
pcd_nospace = Column(
String(8), primary_key=True,
comment="Postcode (no spaces)")
# ... not in original, but simplifies indexing
pcd = Column(
String(7), index=True, unique=True,
comment="Unit postcode (7 characters): 2-4 char outward code, "
"left-aligned; 3-char inward code, right-aligned")
pcd2 = Column(
String(8), index=True, unique=True,
comment="Unit postcode (8 characters): 2-4 char outward code, "
"left-aligned; space; 3-char inward code, right-aligned")
pcds = Column(
String(8), index=True, unique=True,
comment="Unit postcode (variable length): 2-4 char outward "
"code; space; 3-char inward code")
dointr = Column(
Date,
comment="Date of introduction (original format YYYYMM)")
doterm = Column(
Date,
comment="Date of termination (original format YYYYMM) or NULL")
oscty = Column(
String(CODE_LEN),
comment="County code [FK to county_england_2010.county_code]")
oslaua = Column(
String(CODE_LEN),
comment="Local authority district (LUA), unitary authority "
"(UA), metropolitan district (MD), London borough (LB),"
" council area (CA), or district council area (DCA) "
"[FK to lad_local_authority_district_2019.lad_code]")
osward = Column(
String(CODE_LEN),
comment="Electoral ward/division "
"[FK e.g. to electoral_ward_2019.ward_code]")
usertype = Column(
Integer,
comment="Small (0) or large (1) postcode user")
oseast1m = Column(
Integer,
comment="National grid reference Easting, 1m resolution")
osnrth1m = Column(
Integer,
comment="National grid reference Northing, 1m resolution")
osgrdind = Column(
Integer,
comment="Grid reference positional quality indicator")
oshlthau = Column(
String(CODE_LEN),
comment="Former (up to 2013) Strategic Health Authority (SHA), Local "
"Health Board (LHB), Health Board (HB), Health Authority "
"(HA), or Health & Social Care Board (HSCB) [FK to one of: "
"sha_strategic_health_authority_england_2010.sha_code or "
"sha_strategic_health_authority_england_2004.sha_code; "
"hb_health_board_n_ireland_2003.hb_code; "
"hb_health_board_scotland_2014.hb_code; "
"hscb_health_social_care_board_n_ireland_2010.hscb_code; "
"lhb_local_health_board_wales_2014.lhb_code or "
"lhb_local_health_board_wales_2006.lhb_code]")
ctry = Column(
String(CODE_LEN),
comment="Country of the UK [England, Scotland, Wales, "
"Northern Ireland] [FK to country_2012.country_code]")
streg = Column(
Integer,
comment="Standard (Statistical) Region (SSR) [FK to "
"ssr_standard_statistical_region_1995."
"ssr_code]")
pcon = Column(
String(CODE_LEN),
comment="Westminster parliamentary constituency [FK to "
"pcon_westminster_parliamentary_constituency_2014."
"pcon_code]")
eer = Column(
String(CODE_LEN),
comment="European Electoral Region (EER) [FK to "
"eer_european_electoral_region_2010.eer_code]")
teclec = Column(
String(CODE_LEN),
comment="Local Learning and Skills Council (LLSC) / Dept. of "
"Children, Education, Lifelong Learning and Skills (DCELLS) / "
"Enterprise Region (ER) [PROBABLY FK to one of: "
"dcells_dept_children_wales_2010.dcells_code; "
"er_enterprise_region_scotland_2010.er_code; "
"llsc_local_learning_skills_council_england_2010.llsc_code]")
ttwa = Column(
String(CODE_LEN),
comment="Travel to Work Area (TTWA) [FK to "
"ttwa_travel_to_work_area_2011.ttwa_code]")
pct = Column(
String(CODE_LEN),
comment="Primary Care Trust (PCT) / Care Trust / "
"Care Trust Plus (CT) / Local Health Board (LHB) / "
"Community Health Partnership (CHP) / "
"Local Commissioning Group (LCG) / "
"Primary Healthcare Directorate (PHD) [FK to one of: "
"pct_primary_care_trust_2019.pct_code; "
"chp_community_health_partnership_scotland_2012.chp_code; "
"lcg_local_commissioning_group_n_ireland_2010.lcg_code; "
"lhb_local_health_board_wales_2014.lhb_code]")
nuts = Column(
String(10),
comment="LAU2 areas [European Union spatial regions; Local "
"Adminstrative Unit, level 2] / Nomenclature of Units "
"for Territorial Statistics (NUTS) [FK to "
"lau_eu_local_administrative_unit_2019.lau2_code]")
statsward = Column(
String(6),
comment="2005 'statistical' ward [?FK to "
"electoral_ward_2005.ward_code]")
oa01 = Column(
String(10),
comment="2001 Census Output Area (OA). (There are "
"about 222,000, so ~300 population?)")
casward = Column(
String(6),
comment="Census Area Statistics (CAS) ward [PROBABLY FK to "
"cas_ward_2003.cas_ward_code]")
park = Column(
String(CODE_LEN),
comment="National park [FK to "
"park_national_park_2016.park_code]")
lsoa01 = Column(
String(CODE_LEN),
comment="2001 Census Lower Layer Super Output Area (LSOA) [England & "
"Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / "
"Super Output Area (SOA) [FK to one of: "
"lsoa_lower_layer_super_output_area_england_wales_2004.lsoa_code; " # noqa
"lsoa_lower_layer_super_output_area_n_ireland_2005.lsoa_code]")
msoa01 = Column(
String(CODE_LEN),
comment="2001 Census Middle Layer Super Output Area (MSOA) [England & "
"Wales, ~7,200 population] / "
"Intermediate Zone (IZ) [Scotland] [FK to one of: "
"msoa_middle_layer_super_output_area_england_wales_2004.msoa_code; " # noqa
"iz_intermediate_zone_scotland_2005.iz_code]")
ur01ind = Column(
String(1),
comment="2001 Census urban/rural indicator [numeric in "
"England/Wales/Scotland; letters in N. Ireland]")
oac01 = Column(
String(3),
comment="2001 Census Output Area classification (OAC)"
"[POSSIBLY FK to output_area_classification_2011."
"subgroup_code]")
oa11 = Column(
String(CODE_LEN),
comment="2011 Census Output Area (OA) [England, Wales, Scotland;"
" ~100-625 population] / Small Area (SA) [N. Ireland]")
lsoa11 = Column(
String(CODE_LEN),
comment="2011 Census Lower Layer Super Output Area (LSOA) [England & "
"Wales, ~1,500 population] / Data Zone (DZ) [Scotland] / "
"Super Output Area (SOA) [N. Ireland] [FK to one of: "
"lsoa_lower_layer_super_output_area_2011.lsoa_code; " # noqa
" (defunct) dz_datazone_scotland_2011.dz_code]")
msoa11 = Column(
String(CODE_LEN),
comment="2011 Census Middle Layer Super Output Area (MSOA) [England & "
"Wales, ~7,200 population] / "
"Intermediate Zone (IZ) [Scotland] [FK to one of: "
"msoa_middle_layer_super_output_area_2011.msoa_code; " # noqa
"iz_intermediate_zone_scotland_2011.iz_code]")
parish = Column(
String(CODE_LEN),
comment="Parish/community [FK to "
"parish_ncp_england_wales_2018.parish_code]")
wz11 = Column(
String(CODE_LEN),
comment="2011 Census Workplace Zone (WZ)")
ccg = Column(
String(CODE_LEN),
comment="Clinical Commissioning Group (CCG) / Local Health Board "
"(LHB) / Community Health Partnership (CHP) / Local "
"Commissioning Group (LCG) / Primary Healthcare Directorate "
"(PHD) [FK to one of: "
"ccg_clinical_commissioning_group_uk_2019."
"ccg_ons_code, lhb_local_health_board_wales_2014.lhb_code]")
bua11 = Column(
String(CODE_LEN),
comment="Built-up Area (BUA) [FK to "
"bua_built_up_area_uk_2013.bua_code]")
buasd11 = Column(
String(CODE_LEN),
comment="Built-up Area Sub-division (BUASD) [FK to "
"buasd_built_up_area_subdivision_uk_2013.buas_code]")
ru11ind = Column(
String(2),
comment="2011 Census rural-urban classification")
oac11 = Column(
String(3),
comment="2011 Census Output Area classification (OAC) [FK to "
"output_area_classification_2011.subgroup_code]")
lat = Column(
Numeric(precision=9, scale=6),
comment="Latitude (degrees, 6dp)")
long = Column(
Numeric(precision=9, scale=6),
comment="Longitude (degrees, 6dp)")
lep1 = Column(
String(CODE_LEN),
comment="Local Enterprise Partnership (LEP) - first instance [FK to "
"lep_local_enterprise_partnership_england_2017.lep1_code]")
lep2 = Column(
String(CODE_LEN),
comment="Local Enterprise Partnership (LEP) - second instance [FK to "
"lep_local_enterprise_partnership_england_2017.lep1_code]")
pfa = Column(
String(CODE_LEN),
comment="Police Force Area (PFA) [FK to "
"pfa_police_force_area_2015.pfa_code]")
imd = Column(
Integer,
comment="Index of Multiple Deprivation (IMD) [rank of LSOA/DZ, where "
"1 is the most deprived, within each country] [FK to one of: "
"imd_index_multiple_deprivation_england_2015.imd_rank; "
"imd_index_multiple_deprivation_n_ireland_2010.imd_rank; "
"imd_index_multiple_deprivation_scotland_2012.imd_rank; "
"imd_index_multiple_deprivation_wales_2014.imd_rank]")
# New in Nov 2019 ONSPD, relative to 2016 ONSPD:
# ** Not yet implemented:
# calncv
# ced
# nhser
# rgn
# stp
def __init__(self, **kwargs: Any) -> None:
convert_date(kwargs, 'dointr')
convert_date(kwargs, 'doterm')
convert_int(kwargs, 'usertype')
convert_int(kwargs, 'oseast1m')
convert_int(kwargs, 'osnrth1m')
convert_int(kwargs, 'osgrdind')
convert_int(kwargs, 'streg')
convert_int(kwargs, 'edind')
convert_int(kwargs, 'imd')
kwargs['pcd_nospace'] = kwargs['pcd'].replace(" ", "")
super().__init__(**kwargs)
# =============================================================================
# Models: core lookup tables
# =============================================================================
class OAClassification(Base):
"""
Represents 2011 Census Output Area (OA) classification names/codes.
"""
__filename__ = "2011 Census Output Area Classification Names and Codes " \
"UK.xlsx"
__tablename__ = "output_area_classification_2011"
oac11 = Column(String(3), primary_key=True)
supergroup_code = Column(String(1))
supergroup_desc = Column(String(35))
group_code = Column(String(2))
group_desc = Column(String(40))
subgroup_code = Column(String(3))
subgroup_desc = Column(String(60))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'OAC11', 'oac11')
rename_key(kwargs, 'Supergroup', 'supergroup_desc')
rename_key(kwargs, 'Group', 'group_desc')
rename_key(kwargs, 'Subgroup', 'subgroup_desc')
kwargs['supergroup_code'] = kwargs['oac11'][0:1]
kwargs['group_code'] = kwargs['oac11'][0:2]
kwargs['subgroup_code'] = kwargs['oac11']
super().__init__(**kwargs)
class BUA(Base):
"""
Represents England & Wales 2013 build-up area (BUA) codes/names.
"""
__filename__ = "BUA_names and codes UK as at 12_13.xlsx"
__tablename__ = "bua_built_up_area_uk_2013"
bua_code = Column(String(CODE_LEN), primary_key=True)
bua_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'BUA13CD', 'bua_code')
rename_key(kwargs, 'BUA13NM', 'bua_name')
super().__init__(**kwargs)
class BUASD(Base):
"""
Represents built-up area subdivisions (BUASD) in England & Wales 2013.
"""
__filename__ = "BUASD_names and codes UK as at 12_13.xlsx"
__tablename__ = "buasd_built_up_area_subdivision_uk_2013"
buasd_code = Column(String(CODE_LEN), primary_key=True)
buasd_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'BUASD13CD', 'buasd_code')
rename_key(kwargs, 'BUASD13NM', 'buasd_name')
super().__init__(**kwargs)
class CASWard(Base):
"""
Represents censua area statistics (CAS) wards in the UK, 2003.
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#statistical-wards-cas-wards-and-st-wards
""" # noqa
__filename__ = "CAS ward names and codes UK as at 01_03.xlsx"
__tablename__ = "cas_ward_2003"
cas_ward_code = Column(String(CODE_LEN), primary_key=True)
cas_ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WDCAS03CD', 'cas_ward_code')
rename_key(kwargs, 'WDCAS03NM', 'cas_ward_name')
super().__init__(**kwargs)
class CCG(Base):
"""
Represents clinical commissioning groups (CCGs), UK 2019.
"""
__filename__ = "CCG names and codes UK as at 04_19.xlsx"
__tablename__ = "ccg_clinical_commissioning_group_uk_2019"
ccg_ons_code = Column(String(CODE_LEN), primary_key=True)
ccg_ccg_code = Column(String(9))
ccg_name = Column(String(NAME_LEN))
ccg_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CCG19CD', 'ccg_ons_code')
rename_key(kwargs, 'CCG19CDH', 'ccg_ccg_code')
rename_key(kwargs, 'CCG19NM', 'ccg_name')
rename_key(kwargs, 'CCG19NMW', 'ccg_name_welsh')
super().__init__(**kwargs)
class Country(Base):
"""
Represents UK countries, 2012.
This is not a long table.
"""
__filename__ = "Country names and codes UK as at 08_12.xlsx"
__tablename__ = "country_2012"
country_code = Column(String(CODE_LEN), primary_key=True)
country_code_old = Column(Integer) # ?
country_name = Column(String(NAME_LEN))
country_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CTRY12CD', 'country_code')
rename_key(kwargs, 'CTRY12CDO', 'country_code_old')
rename_key(kwargs, 'CTRY12NM', 'country_name')
rename_key(kwargs, 'CTRY12NMW', 'country_name_welsh')
super().__init__(**kwargs)
class County2019(Base):
"""
Represents counties, UK 2019.
"""
__filename__ = "County names and codes UK as at 04_19.xlsx"
__tablename__ = "county_england_2010"
county_code = Column(String(CODE_LEN), primary_key=True)
county_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'CTY19CD', 'county_code')
rename_key(kwargs, 'CTY19NM', 'county_name')
super().__init__(**kwargs)
class EER(Base):
"""
Represents European electoral regions (EERs), UK 2010.
"""
__filename__ = "EER names and codes UK as at 12_10.xlsx"
__tablename__ = "eer_european_electoral_region_2010"
eer_code = Column(String(CODE_LEN), primary_key=True)
eer_code_old = Column(String(2)) # ?
eer_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'EER10CD', 'eer_code')
rename_key(kwargs, 'EER10CDO', 'eer_code_old')
rename_key(kwargs, 'EER10NM', 'eer_name')
super().__init__(**kwargs)
class IMDLookupEN(Base):
"""
Represents the Index of Multiple Deprivation (IMD), England 2015.
**This is quite an important one to us!** IMDs are mapped to LSOAs; see
e.g. :class:`LSOAEW2011`.
"""
__filename__ = "IMD lookup EN as at 12_15.xlsx"
__tablename__ = "imd_index_multiple_deprivation_england_2015"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'IMD15', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class IMDLookupSC(Base):
"""
Represents the Index of Multiple Deprivation (IMD), Scotland 2016.
"""
__filename__ = "IMD lookup SC as at 12_16.xlsx"
__tablename__ = "imd_index_multiple_deprivation_scotland_2016"
dz_code = Column(String(CODE_LEN), primary_key=True)
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'DZ11CD', 'dz_code')
rename_key(kwargs, 'IMD16', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class IMDLookupWA(Base):
"""
Represents the Index of Multiple Deprivation (IMD), Wales 2014.
"""
__filename__ = "IMD lookup WA as at 12_14.xlsx"
__tablename__ = "imd_index_multiple_deprivation_wales_2014"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
imd_rank = Column(Integer)
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'IMD14', 'imd_rank')
convert_int(kwargs, 'imd_rank')
super().__init__(**kwargs)
class LAU(Base):
"""
Represents European Union Local Administrative Units (LAUs), UK 2019.
"""
__filename__ = "LAU2 names and codes UK as at 12_19 (NUTS).xlsx"
__tablename__ = "lau_eu_local_administrative_unit_2019"
lau2_code = Column(String(10), primary_key=True)
lau2_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LAU219CD', 'lau2_code')
rename_key(kwargs, 'LAU219NM', 'lau2_name')
super().__init__(**kwargs)
class LAD(Base):
"""
Represents local authority districts (LADs), UK 2019.
"""
__filename__ = "LA_UA names and codes UK as at 12_19.xlsx"
__tablename__ = "lad_local_authority_district_2019"
lad_code = Column(String(CODE_LEN), primary_key=True)
lad_name = Column(String(NAME_LEN))
lad_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LAD19CD', 'lad_code')
rename_key(kwargs, 'LAD19NM', 'lad_name')
rename_key(kwargs, 'LAD19NMW', 'lad_name_welsh')
super().__init__(**kwargs)
class LEP(Base):
"""
Represents Local Enterprise Partnerships (LEPs), England 2017.
"""
__filename__ = "LEP names and codes EN as at 04_17 v2.xlsx"
__tablename__ = "lep_local_enterprise_partnership_england_2017"
# __debug_content__ = True
lep_code = Column(String(CODE_LEN), primary_key=True)
lep_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LEP17CD', 'lep_code')
rename_key(kwargs, 'LEP17NM', 'lep_name')
super().__init__(**kwargs)
class LSOA2011(Base):
"""
Represents lower layer super output area (LSOAs), UK 2011.
**This is quite an important one.** LSOAs map to IMDs; see
:class:`IMDLookupEN`.
"""
__filename__ = "LSOA (2011) names and codes UK as at 12_12.xlsx"
__tablename__ = "lsoa_lower_layer_super_output_area_2011"
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
super().__init__(**kwargs)
class MSOA2011(Base):
"""
Represents middle layer super output areas (MSOAs), UK 2011.
"""
__filename__ = "MSOA (2011) names and codes UK as at 12_12.xlsx"
__tablename__ = "msoa_middle_layer_super_output_area_2011"
msoa_code = Column(String(CODE_LEN), primary_key=True)
msoa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'MSOA11CD', 'msoa_code')
rename_key(kwargs, 'MSOA11NM', 'msoa_name')
super().__init__(**kwargs)
class NationalPark(Base):
"""
Represents national parks, Great Britain 2016.
"""
__filename__ = "National Park names and codes GB as at 08_16.xlsx"
__tablename__ = "park_national_park_2016"
park_code = Column(String(CODE_LEN), primary_key=True)
park_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'NPARK16CD', 'park_code')
rename_key(kwargs, 'NPARK16NM', 'park_name')
super().__init__(**kwargs)
class Parish(Base):
"""
Represents parishes, England & Wales 2014.
"""
__filename__ = "Parish_NCP names and codes EW as at 12_18.xlsx"
__tablename__ = "parish_ncp_england_wales_2018"
parish_code = Column(String(CODE_LEN), primary_key=True)
parish_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PARNCP18CD', 'parish_code')
rename_key(kwargs, 'PARNCP18NM', 'parish_name')
super().__init__(**kwargs)
class PCT2019(Base):
"""
Represents Primary Care Trust (PCT) organizations, UK 2019.
The forerunner of CCGs (q.v.).
"""
__filename__ = "PCT names and codes UK as at 04_19.xlsx"
__tablename__ = "pct_primary_care_trust_2019"
pct_code = Column(String(CODE_LEN), primary_key=True)
pct_code_old = Column(String(5))
pct_name = Column(String(NAME_LEN))
pct_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PCTCD', 'pct_code')
rename_key(kwargs, 'PCTCDO', 'pct_code_old')
rename_key(kwargs, 'PCTNM', 'pct_name')
rename_key(kwargs, 'PCTNMW', 'pct_name_welsh')
super().__init__(**kwargs)
class PFA(Base):
"""
Represents police force areas (PFAs), Great Britain 2015.
"""
__filename__ = "PFA names and codes GB as at 12_15.xlsx"
__tablename__ = "pfa_police_force_area_2015"
pfa_code = Column(String(CODE_LEN), primary_key=True)
pfa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PFA15CD', 'pfa_code')
rename_key(kwargs, 'PFA15NM', 'pfa_name')
super().__init__(**kwargs)
class GOR(Base):
"""
Represents Government Office Regions (GORs), England 2010.
"""
__filename__ = "Region names and codes EN as at 12_10 (RGN).xlsx"
__tablename__ = "gor_govt_office_region_england_2010"
gor_code = Column(String(CODE_LEN), primary_key=True)
gor_code_old = Column(String(1))
gor_name = Column(String(NAME_LEN))
gor_name_welsh = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'GOR10CD', 'gor_code')
rename_key(kwargs, 'GOR10CDO', 'gor_code_old')
rename_key(kwargs, 'GOR10NM', 'gor_name')
rename_key(kwargs, 'GOR10NMW', 'gor_name')
super().__init__(**kwargs)
class SSR(Base):
"""
Represents Standard Statistical Regions (SSRs), UK 2005.
"""
__filename__ = "SSR names and codes UK as at 12_05 (STREG).xlsx"
__tablename__ = "ssr_standard_statistical_region_1995"
ssr_code = Column(Integer, primary_key=True)
ssr_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'SSR95CD', 'ssr_code')
rename_key(kwargs, 'SSR95NM', 'ssr_name')
convert_int(kwargs, 'ssr_code')
super().__init__(**kwargs)
_ = '''
# NOT WORKING 2020-03-03: missing PK somewhere? Also: unimportant.
class Ward2005(Base):
"""
Represents electoral wards, UK 2005.
"""
__filename__ = "Statistical ward names and codes UK as at 2005.xlsx"
__tablename__ = "electoral_ward_2005"
ward_code = Column(String(6), primary_key=True)
ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WDSTL05CD', 'ward_code')
rename_key(kwargs, 'WDSTL05NM', 'ward_name')
super().__init__(**kwargs)
'''
class Ward2019(Base):
"""
Represents electoral wards, UK 2016.
"""
__filename__ = "Ward names and codes UK as at 12_19.xlsx"
__tablename__ = "electoral_ward_2019"
ward_code = Column(String(CODE_LEN), primary_key=True)
ward_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'WD19CD', 'ward_code')
rename_key(kwargs, 'WD19NM', 'ward_name')
super().__init__(**kwargs)
class TTWA(Base):
"""
Represents travel-to-work area (TTWAs), UK 2011.
"""
__filename__ = "TTWA names and codes UK as at 12_11 v5.xlsx"
__tablename__ = "ttwa_travel_to_work_area_2011"
ttwa_code = Column(String(CODE_LEN), primary_key=True)
ttwa_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'TTWA11CD', 'ttwa_code')
rename_key(kwargs, 'TTWA11NM', 'ttwa_name')
super().__init__(**kwargs)
class WestminsterConstituency(Base):
"""
Represents Westminster parliamentary constituencies, UK 2014.
"""
__filename__ = "Westminster Parliamentary Constituency names and codes " \
"UK as at 12_14.xlsx"
__tablename__ = "pcon_westminster_parliamentary_constituency_2014"
pcon_code = Column(String(CODE_LEN), primary_key=True)
pcon_name = Column(String(NAME_LEN))
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'PCON14CD', 'pcon_code')
rename_key(kwargs, 'PCON14NM', 'pcon_name')
super().__init__(**kwargs)
_ = '''
# =============================================================================
# Models: centroids
# =============================================================================
# http://webarchive.nationalarchives.gov.uk/20160105160709/http://www.ons.gov.uk/ons/guide-method/geography/products/census/spatial/centroids/index.html # noqa
#
# Looking at lower_layer_super_output_areas_(e+w)_2011_population_weighted_centroids_v2.zip : # noqa
# - LSOA_2011_EW_PWC.shp -- probably a Shape file;
# ... yes
# ... https://en.wikipedia.org/wiki/Shapefile
# ... ... describes most of the other files
# - LSOA_2011_EW_PWC_COORD_V2.CSV -- LSOA to centroid coordinates
class PopWeightedCentroidsLsoa2011(Base):
"""
Represents a population-weighted centroid of a lower layer super output
area (LSOA).
That is, the geographical centre of the LSOA, weighted by population. (A
first approximation: imagine every person pulling on the centroid
simultaneously and with equal force from their home. Where will it end up?)
""" # noqa
__filename__ = "LSOA_2011_EW_PWC_COORD_V2.CSV"
__tablename__ = "pop_weighted_centroids_lsoa_2011"
# __debug_content__ = True
lsoa_code = Column(String(CODE_LEN), primary_key=True)
lsoa_name = Column(String(NAME_LEN))
bng_north = Column(Integer, comment="British National Grid, North (m)")
bng_east = Column(Integer, comment="British National Grid, East (m)")
# https://en.wikipedia.org/wiki/Ordnance_Survey_National_Grid#All-numeric_grid_references # noqa
latitude = Column(Numeric(precision=13, scale=10),
comment="Latitude (degrees, 10dp)")
longitude = Column(Numeric(precision=13, scale=10),
comment="Longitude (degrees, 10dp)")
# ... there are some with 10dp, e.g. 0.0000570995
# ... (precision - scale) = number of digits before '.'
# ... which can't be more than 3 for any latitude/longitude
def __init__(self, **kwargs: Any) -> None:
rename_key(kwargs, 'LSOA11CD', 'lsoa_code')
rename_key(kwargs, 'LSOA11NM', 'lsoa_name')
rename_key(kwargs, 'BNGNORTH', 'bng_north')
rename_key(kwargs, 'BNGEAST', 'bng_east')
rename_key(kwargs, 'LONGITUDE', 'longitude')
rename_key(kwargs, 'LATITUDE', 'latitude')
# MySQL doesn't care if you pass a string to a numeric field, but
# SQL server does. So:
convert_int(kwargs, 'bng_north')
convert_int(kwargs, 'bng_east')
convert_float(kwargs, 'longitude')
convert_float(kwargs, 'latitude')
super().__init__(**kwargs)
if not self.lsoa_code:
raise ValueError("Can't have a blank lsoa_code")
'''
# =============================================================================
# Files -> table data
# =============================================================================
def populate_postcode_table(filename: str,
session: Session,
replace: bool = False,
startswith: List[str] = None,
reportevery: int = DEFAULT_REPORT_EVERY,
commit: bool = True,
commitevery: int = DEFAULT_COMMIT_EVERY) -> None:
"""
Populates the :class:`Postcode` table, which is very big, from Office of
National Statistics Postcode Database (ONSPD) database that you have
downloaded.
Args:
filename: CSV file to read
session: SQLAlchemy ORM database session
replace: replace tables even if they exist? (Otherwise, skip existing
tables.)
startswith: if specified, restrict to postcodes that start with one of
these strings
reportevery: report to the Python log every *n* rows
commit: COMMIT the session once we've inserted the data?
commitevery: if committing: commit every *n* rows inserted
"""
tablename = Postcode.__tablename__
# noinspection PyUnresolvedReferences
table = Postcode.__table__
if not replace:
engine = session.bind
if engine.has_table(tablename):
log.info(f"Table {tablename} exists; skipping")
return
log.info(f"Dropping/recreating table: {tablename}")
table.drop(checkfirst=True)
table.create(checkfirst=True)
log.info(f"Using ONSPD data file: {filename}")
n = 0
n_inserted = 0
extra_fields = [] # type: List[str]
db_fields = sorted(k for k in table.columns.keys() if k != 'pcd_nospace')
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
n += 1
if n % reportevery == 0:
log.info(f"Processing row {n}: {row['pcds']} "
f"({n_inserted} inserted)")
# log.debug(row)
if n == 1:
file_fields = sorted(row.keys())
missing_fields = sorted(set(db_fields) - set(file_fields))
extra_fields = sorted(set(file_fields) - set(db_fields))
if missing_fields:
log.warning(
f"Fields in database but not file: {missing_fields}")
if extra_fields:
log.warning(
f"Fields in file but not database : {extra_fields}")
for k in extra_fields:
del row[k]
if startswith:
ok = False
for s in startswith:
if row['pcd'].startswith(s):
ok = True
break
if not ok:
continue
obj = Postcode(**row)
session.add(obj)
n_inserted += 1
if commit and n % commitevery == 0:
commit_and_announce(session)
if commit:
commit_and_announce(session)
# BASETYPE = TypeVar('BASETYPE', bound=Base)
# http://mypy.readthedocs.io/en/latest/kinds_of_types.html
# https://docs.python.org/3/library/typing.html
def populate_generic_lookup_table(
sa_class: GenericLookupClassType,
datadir: str,
session: Session,
replace: bool = False,
commit: bool = True,
commitevery: int = DEFAULT_COMMIT_EVERY) -> None:
"""
Populates one of many generic lookup tables with ONSPD data.
We find the data filename from the ``__filename__`` property of the
specific class, hunting for it within ``datadir`` and its subdirectories.
The ``.TXT`` files look at first glance like tab-separated values files,
but in some cases have inconsistent numbers of tabs (e.g. "2011 Census
Output Area Classification Names and Codes UK.txt"). So we'll use the
``.XLSX`` files.
If the headings parameter is passed, those headings are used. Otherwise,
the first row is used for headings.
Args:
sa_class: SQLAlchemy ORM class
datadir: root directory of ONSPD data
session: SQLAlchemy ORM database session
replace: replace tables even if they exist? (Otherwise, skip existing
tables.)
commit: COMMIT the session once we've inserted the data?
commitevery: if committing: commit every *n* rows inserted
"""
tablename = sa_class.__tablename__
filename = find_first(sa_class.__filename__, datadir)
headings = getattr(sa_class, '__headings__', [])
debug = getattr(sa_class, '__debug_content__', False)
n = 0
if not replace:
engine = session.bind
if engine.has_table(tablename):
log.info(f"Table {tablename} exists; skipping")
return
log.info(f"Dropping/recreating table: {tablename}")
sa_class.__table__.drop(checkfirst=True)
sa_class.__table__.create(checkfirst=True)
log.info(f'Processing file "{filename}" -> table "{tablename}"')
ext = os.path.splitext(filename)[1].lower()
type_xlsx = ext in ['.xlsx']
type_csv = ext in ['.csv']
file = None # type: Optional[TextIO]
def dict_from_rows(row_iterator: Iterable[List]) \
-> Generator[Dict, None, None]:
local_headings = headings
first_row = True
for row in row_iterator:
values = values_from_row(row)
if first_row and not local_headings:
local_headings = values
else:
yield dict(zip(local_headings, values))
first_row = False
if type_xlsx:
workbook = openpyxl.load_workbook(filename) # read_only=True
# openpyxl BUG: with read_only=True, cells can have None as their value
# when they're fine if opened in non-read-only mode.
# May be related to this:
# https://bitbucket.org/openpyxl/openpyxl/issues/601/read_only-cell-row-column-attributes-are # noqa
sheet = workbook.active
dict_iterator = dict_from_rows(sheet.iter_rows())
elif type_csv:
file = open(filename, 'r')
csv_reader = csv.DictReader(file)
dict_iterator = csv_reader
else:
raise ValueError("Only XLSX and CSV these days")
# workbook = xlrd.open_workbook(filename)
# sheet = workbook.sheet_by_index(0)
# dict_iterator = dict_from_rows(sheet.get_rows())
for datadict in dict_iterator:
n += 1
if debug:
log.critical(f"{n}: {datadict}")
# filter out blanks:
datadict = {k: v for k, v in datadict.items() if k}
# noinspection PyNoneFunctionAssignment
obj = sa_class(**datadict)
session.add(obj)
if commit and n % commitevery == 0:
commit_and_announce(session)
if commit:
commit_and_announce(session)
log.info(f"... inserted {n} rows")
if file:
file.close()
# =============================================================================
# Docs
# =============================================================================
def show_docs() -> None:
"""
Print the column ``doc`` attributes from the :class:`Postcode` class, in
tabular form, to stdout.
"""
# noinspection PyUnresolvedReferences
table = Postcode.__table__
columns = sorted(table.columns.keys())
pt = prettytable.PrettyTable(
["postcode field", "Description"],
# header=False,
border=True,
hrules=prettytable.ALL,
vrules=prettytable.NONE,
)
pt.align = 'l'
pt.valign = 't'
pt.max_width = 80
for col in columns:
doc = getattr(Postcode, col).doc
doc = wordwrap(doc, width=70)
ptrow = [col, doc]
pt.add_row(ptrow)
print(pt.get_string())
# =============================================================================
# Main
# =============================================================================
def main() -> None:
"""
Command-line entry point. See command-line help.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
formatter_class=RawDescriptionArgumentDefaultsHelpFormatter,
description=r"""
- This program reads data from the UK Office of National Statistics Postcode
Database (ONSPD) and inserts it into a database.
- You will need to download the ONSPD from
https://geoportal.statistics.gov.uk/geoportal/catalog/content/filelist.page
e.g. ONSPD_MAY_2016_csv.zip (79 Mb), and unzip it (>1.4 Gb) to a directory.
Tell this program which directory you used.
- Specify your database as an SQLAlchemy connection URL: see
http://docs.sqlalchemy.org/en/latest/core/engines.html
The general format is:
dialect[+driver]://username:password@host[:port]/database[?key=value...]
- If you get an error like:
UnicodeEncodeError: 'latin-1' codec can't encode character '\u2019' in
position 33: ordinal not in range(256)
then try appending "?charset=utf8" to the connection URL.
- ONS POSTCODE DATABASE LICENSE.
Output using this program must add the following attribution statements:
Contains OS data © Crown copyright and database right [year]
Contains Royal Mail data © Royal Mail copyright and database right [year]
Contains National Statistics data © Crown copyright and database right [year]
See http://www.ons.gov.uk/methodology/geography/licences
""") # noqa: E501
parser.add_argument(
"--dir", default=DEFAULT_ONSPD_DIR,
help="Root directory of unzipped ONSPD download")
parser.add_argument(
"--url", help="SQLAlchemy database URL")
parser.add_argument(
"--echo", action="store_true", help="Echo SQL")
parser.add_argument(
"--reportevery", type=int, default=DEFAULT_REPORT_EVERY,
help="Report every n rows")
parser.add_argument(
"--commitevery", type=int, default=DEFAULT_COMMIT_EVERY,
help=(
"Commit every n rows. If you make this too large "
"(relative e.g. to your MySQL max_allowed_packet setting, you may"
" get crashes with errors like 'MySQL has gone away'."))
parser.add_argument(
"--startswith", nargs="+",
help="Restrict to postcodes that start with one of these strings")
parser.add_argument(
"--replace", action="store_true",
help="Replace tables even if they exist (default: skip existing "
"tables)")
parser.add_argument(
"--skiplookup", action="store_true",
help="Skip generation of code lookup tables")
parser.add_argument(
"--specific_lookup_tables", nargs="*",
help="Within the lookup tables, process only specific named tables")
parser.add_argument(
"--list_lookup_tables", action="store_true",
help="List all possible lookup tables, then stop")
parser.add_argument(
"--skippostcodes", action="store_true",
help="Skip generation of main (large) postcode table")
parser.add_argument(
"--docsonly", action="store_true",
help="Show help for postcode table then stop")
parser.add_argument(
"-v", "--verbose", action="store_true", help="Verbose")
args = parser.parse_args()
rootlogger = logging.getLogger()
configure_logger_for_colour(
rootlogger, level=logging.DEBUG if args.verbose else logging.INFO)
log.debug(f"args = {args!r}")
if args.docsonly:
show_docs()
sys.exit(0)
classlist = [
# Core lookup tables:
# In alphabetical order of filename:
OAClassification,
BUA,
BUASD,
CASWard,
CCG,
Country,
County2019,
EER,
IMDLookupEN,
IMDLookupSC,
IMDLookupWA,
LAU,
LAD,
LEP,
LSOA2011,
MSOA2011,
NationalPark,
Parish,
PCT2019,
PFA,
GOR,
SSR,
# Ward2005,
TTWA,
Ward2019,
WestminsterConstituency,
# Centroids:
# PopWeightedCentroidsLsoa2011,
]
if args.list_lookup_tables:
tables_files = [] # type: List[Tuple[str, str]]
for sa_class in classlist:
tables_files.append((sa_class.__tablename__,
sa_class.__filename__))
tables_files.sort(key=lambda x: x[0])
for table, file in tables_files:
print(f"Table {table} from file {file!r}")
return
if not args.url:
print("Must specify URL")
return
engine = create_engine(args.url, echo=args.echo, encoding=CHARSET)
metadata.bind = engine
session = sessionmaker(bind=engine)()
log.info(f"Using directory: {args.dir}")
# lookupdir = os.path.join(args.dir, "Documents")
lookupdir = args.dir
# datadir = os.path.join(args.dir, "Data")
datadir = args.dir
if not args.skiplookup:
for sa_class in classlist:
if (args.specific_lookup_tables and
sa_class.__tablename__ not in args.specific_lookup_tables):
continue
# if (sa_class.__tablename__ ==
# "ccg_clinical_commissioning_group_uk_2019"):
# log.warning("Ignore warning 'Discarded range with reserved "
# "name' below; it works regardless")
populate_generic_lookup_table(
sa_class=sa_class,
datadir=lookupdir,
session=session,
replace=args.replace,
commit=True,
commitevery=args.commitevery
)
if not args.skippostcodes:
populate_postcode_table(
filename=find_first("ONSPD_*.csv", datadir),
session=session,
replace=args.replace,
startswith=args.startswith,
reportevery=args.reportevery,
commit=True,
commitevery=args.commitevery
)
if __name__ == '__main__':
main()
| RudolfCardinal/crate | crate_anon/preprocess/postcodes.py | Python | gpl-3.0 | 52,340 |
from django.contrib.auth.models import User
from ws.messages import security
from ws.models import Participant
class PrefetchGroupsMiddleware:
"""Prefetch the user's groups for use in the requset.
We do a lot of group-centric logic - if the user's groups aren't
prefetched, then we can easily have n+1 queries. This middleware
prevents n+1 queries, at the cost of 1 extra query.
This is a slight hack - the proper way to implement this is with
a custom authentication backend where we implement the get_user()
method to do the prefetching (we would obviously extend all-auth).
For now, this cuts down on query time and execution.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.user.is_authenticated:
filtered_user = User.objects.filter(pk=request.user.pk)
request.user = filtered_user.prefetch_related('groups').get()
return self.get_response(request)
class ParticipantMiddleware:
"""Include the user's participant (used in most views)"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# TODO: We check for `password_quality` on every request. We should join that.
request.participant = Participant.from_user(request.user)
return self.get_response(request)
class CustomMessagesMiddleware:
"""Render some custom messages on every page load.
Caution: *must* be installed after both:
- ParticipantMiddleware (to access participant info for messages)
- MessagesMiddleware (to render messages)
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
security.Messages(request).supply()
return self.get_response(request)
| DavidCain/mitoc-trips | ws/middleware.py | Python | gpl-3.0 | 1,876 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
#import tagging_autocomplete.models
import datetime
class Migration(migrations.Migration):
dependencies = [
('thedirectory', '0003_auto_20150525_1515'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='tags',
#field=tagging_autocomplete.models.TagAutocompleteField(max_length=255, blank=True),
field=models.CharField(default=datetime.datetime(2015, 5, 25, 17, 23, 6, 515140), max_length=b'105'),
),
]
| owatte/thecaribfos | apps/thedirectory/migrations/0004_auto_20150529_1416.py | Python | gpl-3.0 | 616 |
import unittest
from pyicoteolib.utils import DualSortedReader
from pyicoteolib.core import BED
class TestUtils(unittest.TestCase):
def test_dual_reader(self):
reader = DualSortedReader("test_files/mini_sorted.bed", "test_files/mini_sorted2.bed", BED, False, False)
merged_file = open("test_files/mini_sorted_merged.bed")
for line in reader:
if line:
self.assertEqual(line, merged_file.next())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestUtils))
return suite
| RegulatoryGenomicsUPF/pyicoteo | utest/testUtils.py | Python | gpl-3.0 | 566 |
import sys
sentid_prev = 0
first_line = True
first_word = True
for line in sys.stdin:
row = line.strip().split()
if first_line:
word_ix = row.index('word')
sentid_ix = row.index('sentid')
first_line = False
else:
word = row[word_ix]
sentid = row[sentid_ix]
if first_word:
delim = ''
first_word = False
elif sentid == sentid_prev:
delim = ' '
else:
delim = '\n'
sentid_prev = sentid
sys.stdout.write(delim + word)
sys.stdout.write('\n')
| modelblocks/modelblocks-release | resource-general/scripts/itemmeasures2lineitems.py | Python | gpl-3.0 | 588 |
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arsoft.web.ddns.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| aroth-arsoft/arsoft-web-ddns | manage.py | Python | gpl-3.0 | 259 |
import numpy as np
import sys
import os
import time
from ase import Atom, Atoms
from ase.visualize import view
from ase.units import Bohr
from ase.structure import bulk
from gpaw import GPAW
from gpaw.atom.basis import BasisMaker
from gpaw.response.df import DF
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
# Ground state calculation
a = 4.043
atoms = bulk('Al', 'fcc', a=a)
atoms.center()
calc = GPAW(gpts=(12,12,12),
kpts=(4,4,4),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al1.gpw','all')
# Excited state calculation
q = np.array([1./4.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al1.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_1')
atoms = Atoms('Al8',scaled_positions=[(0,0,0),
(0.5,0,0),
(0,0.5,0),
(0,0,0.5),
(0.5,0.5,0),
(0.5,0,0.5),
(0.,0.5,0.5),
(0.5,0.5,0.5)],
cell=[(0,a,a),(a,0,a),(a,a,0)],
pbc=True)
calc = GPAW(gpts=(24,24,24),
kpts=(2,2,2),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al2.gpw','all')
# Excited state calculation
q = np.array([1./2.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al2.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_2')
d1 = np.loadtxt('EELS_Al_1')
d2 = np.loadtxt('EELS_Al_2')
error1 = (d1[1:,1] - d2[1:,1]) / d1[1:,1] * 100
error2 = (d1[1:,2] - d2[1:,2]) / d1[1:,2] * 100
if error1.max() > 0.2 or error2.max() > 0.2: # percent
print error1.max(), error2.max()
raise ValueError('Pls check spectrum !')
#if rank == 0:
# os.remove('Al1.gpw')
# os.remove('Al2.gpw')
| ajylee/gpaw-rtxs | gpaw/test/aluminum_testcell.py | Python | gpl-3.0 | 1,942 |
"""Injection control subpackage."""
| lnls-sirius/dev-packages | siriuspy/siriuspy/injctrl/__init__.py | Python | gpl-3.0 | 36 |
import os
CSRF_ENABLED = True
ENV = os.environ.get('ENVIRONMENT', 'dev')
MODEL_HASH = os.environ.get('MODEL_HASH')
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
ROOT_PATH = BASE_DIR = os.path.join(os.path.dirname(__file__), '..')
SECRET_KEY = os.environ.get('SECRET_KEY')
STATIC_FOLDER = os.path.join(ROOT_PATH, 'static')
TEMPLATE_FOLDER = os.path.join(ROOT_PATH, 'templates')
SQLALCHEMY_MIGRATE_REPO = os.path.join(ROOT_PATH, 'db_repository')
if ENV == 'dev':
PORT = 7070
APP_BASE_LINK = 'http://localhost:{}'.format(PORT)
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/dev_db'
else:
APP_BASE_LINK = os.environ.get('APP_BASE_LINK')
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://{}:{}@{}:{}/{}'.format(os.environ.get('POSTGRES_ENV_POSTGRES_USER'),
os.environ.get('POSTGRES_ENV_POSTGRES_PASSWORD'),
os.environ.get('POSTGRES_PORT_5432_TCP_ADDR'),
os.environ.get('POSTGRES_PORT_5432_TCP_PORT'),
os.environ.get('POSTGRES_ENV_POSTGRESQL_DB'))
| opsolutely/flask-nginx-starter | app/config.py | Python | gpl-3.0 | 1,274 |
import numpy as np
import torch
import time
from torch.autograd import Variable
'''
fast beam search
'''
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def tensor_transformer(seq0, batch_size, beam_size):
seq = seq0.unsqueeze(2)
seq = seq.repeat(1, 1, beam_size, 1)
seq = seq.contiguous().view(batch_size, beam_size*beam_size, seq.size(3))
return seq
'''
First beam search
'''
def fast_beam_search_1(
model_emb,
model_s2s,
src_text_rep,
vocab2id,
batch_size,
beam_size,
trg_len,
encoder_hy,
hidden_,
h_attn_new,
p_gen_new,
past_attn_new,
pt_idx
):
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(
batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(
batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
beam_h_attn = Variable(torch.FloatTensor(
trg_len, batch_size, beam_size, h_attn_new.size(1)).fill_(0.0)).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn, past_attn = model_s2s.forward_onestep_decoder1(
j,
last_emb,
(h0_new, c0_new),
h_attn_new,
encoder_hy,
p_gen_new,
past_attn_new,
pt_idx
)
p_gen_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn_new = h_attn
past_attn_new = past_attn
beam_h_attn[j] = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn_new = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(batch_size, beam_size, past_attn_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn = h_attn.view(batch_size, beam_size, h_attn.size(-1))
h_attn = tensor_transformer(h_attn, batch_size, beam_size)
past_attn = past_attn.view(batch_size, beam_size, past_attn.size(-1))
past_attn = tensor_transformer(past_attn, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
h_attn_new[x, b] = h_attn[x, tmp_idx[x, b]]
past_attn_new[x, b] = past_attn[x, tmp_idx[x, b]]
beam_h_attn[j] = h_attn_new
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
h_attn_new = h_attn_new.view(-1, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(-1, past_attn_new.size(-1))
return beam_seq, beam_prb, beam_h_attn
'''
second beam search
'''
def fast_beam_search_2(
model_emb,
model_s2s,
src_text_rep,
vocab2id,
batch_size,
beam_size,
trg_len,
encoder_hy,
hidden_,
h_attn21_new,
h_attn22_new,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
):
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn21, h_attn22, past_attn21, past_attn22 = model_s2s.forward_onestep_decoder2(
j,
last_emb,
(h0_new, c0_new),
h_attn21_new,
h_attn22_new,
encoder_hy,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
)
p_gen21_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn21_new = h_attn21
h_attn22_new = h_attn22
past_attn21_new = past_attn21
past_attn22_new = past_attn22
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn21_new = h_attn21_new.view(batch_size, beam_size, h_attn21_new.size(-1))
h_attn22_new = h_attn22_new.view(batch_size, beam_size, h_attn22_new.size(-1))
past_attn21_new = past_attn21_new.view(batch_size, beam_size, past_attn21_new.size(-1))
past_attn22_new = past_attn22_new.view(batch_size, beam_size, past_attn22_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn21 = h_attn21.view(batch_size, beam_size, h_attn21.size(-1))
h_attn21 = tensor_transformer(h_attn21, batch_size, beam_size)
h_attn22 = h_attn22.view(batch_size, beam_size, h_attn22.size(-1))
h_attn22 = tensor_transformer(h_attn22, batch_size, beam_size)
past_attn21 = past_attn21.view(batch_size, beam_size, past_attn21.size(-1))
past_attn21 = tensor_transformer(past_attn21, batch_size, beam_size)
past_attn22 = past_attn22.view(batch_size, beam_size, past_attn22.size(-1))
past_attn22 = tensor_transformer(past_attn22, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
h_attn21_new[x, b] = h_attn21[x, tmp_idx[x, b]]
h_attn22_new[x, b] = h_attn22[x, tmp_idx[x, b]]
past_attn21_new[x, b] = past_attn21[x, tmp_idx[x, b]]
past_attn22_new[x, b] = past_attn22[x, tmp_idx[x, b]]
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
h_attn21_new = h_attn21_new.view(-1, h_attn21_new.size(-1))
h_attn22_new = h_attn22_new.view(-1, h_attn22_new.size(-1))
past_attn21_new = past_attn21_new.view(-1, past_attn21_new.size(-1))
past_attn22_new = past_attn22_new.view(-1, past_attn22_new.size(-1))
return beam_seq, beam_prb
| tshi04/machine-learning-codes | deliberation_network/utils.py | Python | gpl-3.0 | 9,232 |
from jabbapylib.distance.dist import lev_dist, ham_dist, similarity
def test_lev_dist():
assert lev_dist('ag-tcc', 'cgctca') == 3
assert lev_dist('GUMBO', 'GAMBOL') == 2
assert lev_dist('Google', 'Yahoo!') == 6
def test_ham_dist():
assert ham_dist('toned', 'roses') == 3
def test_similarity():
assert similarity('toned', 'roses') == 2
| jabbalaci/jabbapylib | tests/distance/test_dist.py | Python | gpl-3.0 | 358 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple command line interface to pyluxafor.
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from pyluxafor import Devices
from pyluxafor import Wave, Pattern, Leds
from pyluxafor import __version__
__author__ = 'Magnus Isaksson'
__copyright__ = 'Magnus Isaksson'
__license__ = 'gpl3'
_logger = logging.getLogger(__name__)
def add_jump2color_parser(subparsers):
parser = subparsers.add_parser('jump2color',
description='Switches color on your Luxafor device.',
help='Switches color on your Luxafor device.')
parser.set_defaults(runner=jump2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
def jump2color(args):
with Devices().first as d:
d.jump2color(args.color, leds=Leds.all)
return 'Jumping to color: {}'.format(args.color)
def add_fade2color_parser(subparsers):
parser = subparsers.add_parser('fade2color',
description='Fade to color on your Luxafor device.',
help='Fade to color on your Luxafor device.')
parser.set_defaults(runner=fade2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Fading speed [0-255], low value equals higher speed.')
def fade2color(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
with Devices().first as d:
d.fade2color(args.color, leds=Leds.all, speed=args.speed)
return 'Fading to color: {} with speed: {}'.format(args.color, args.speed)
def add_blink_parser(subparsers):
parser = subparsers.add_parser('blink',
description='Blink color on your Luxafor device.',
help='Blink color on your Luxafor device.')
parser.set_defaults(runner=blink)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
help='Repeats [1-255].')
def blink(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to both be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be an integer between 1 and 255.'
with Devices().first as d:
d.blink(args.color, leds=Leds.all, speed=args.speed, repeats=args.repeats)
return 'Blinking color: {}, {} times with speed: {}'.format(args.color, args.repeats, args.speed)
def add_pattern_parser(subparsers):
parser = subparsers.add_parser('pattern',
description='Run pattern on your Luxafor device.',
help='Run pattern on your Luxafor device.')
parser.set_defaults(runner=pattern)
parser.add_argument('-p',
'--pattern',
required=True,
help=', '.join([p for p in Pattern._fields]))
parser.add_argument('-r',
'--repeats',
type=int,
default=2,
required=False,
help='Repeats [1-255].')
def pattern(args):
args.pattern = args.pattern.lower()
if args.pattern not in Pattern._fields:
return 'Error: {} is not a valid pattern.'.format(args.pattern)
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be a integer between 1 and 255.'
with Devices().first as d:
d.pattern(pattern_type=getattr(Pattern, args.pattern), repeats=args.repeats)
return 'Running pattern {} {} times.'.format(args.pattern, args.repeats)
def add_wave_parser(subparsers):
parser = subparsers.add_parser('wave',
description='Run wave on your Luxafor device.',
help='Run wave on your Luxafor device.')
parser.set_defaults(runner=wave)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-w',
'--wave',
required=True,
help=', '.join([p for p in Wave._fields]))
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
help='Repeats [1-255].')
def wave(args):
args.pattern = args.wave.lower()
if args.pattern not in Wave._fields:
return 'Error: {} is not a valid wave type.'.format(args.pattern)
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be a integer between 1 and 255.'
with Devices().first as d:
d.wave(color=args.color, wave_type=getattr(Wave, args.wave), speed=args.speed, repeats=args.repeats)
return 'Running a {} wave with color {} and speed {}, {} times.'.format(args.wave, args.color,
args.speed, args.repeats)
def add_off_parser(subparsers):
parser = subparsers.add_parser('off',
description='Turn all LEDs of on your Luxafor device.',
help='Turn all LEDs of on your Luxafor device.')
parser.set_defaults(runner=off)
def off(args):
with Devices().first as d:
d.off()
return 'Turning of all LEDs on your device.'
def add_list_devices_parser(subparsers):
parser = subparsers.add_parser('devices',
description='List all Luxafor devices found on your system.',
help='List all Luxafor devices found on your system.')
parser.set_defaults(runner=list_devices)
def list_devices(args):
ans_str = 'Sorry, no Luxafor device found in the system.'
devices = ['Product: {}, Manufacturer: {}, Serial #: {}'.format(d.conn.product,
d.conn.manufacturer,
d.conn.serial_number.encode('utf-8')) for d in Devices().list]
if devices:
ans_str = '\n'.join(devices)
return '\nFound {} devices connected to your system.\n{}'.format(len(devices), ans_str)
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Simple command line interface using pyluxafor.")
# Global arguments
parser.add_argument('-v',
'--version',
action='version',
version='pyluxafor {ver}'.format(ver=__version__))
# Sub parsers
subparsers = parser.add_subparsers(title='subcommands')
add_list_devices_parser(subparsers)
add_jump2color_parser(subparsers)
add_fade2color_parser(subparsers)
add_blink_parser(subparsers)
add_pattern_parser(subparsers)
add_wave_parser(subparsers)
add_off_parser(subparsers)
return parser
def main(args):
parser = parse_args(args)
args = parser.parse_args(args)
# Do we know what to run?
if 'runner' not in args:
parser.print_help()
else:
ans = args.runner(args)
print(ans)
def run():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
main(sys.argv[1:])
if __name__ == "__main__":
run()
| mais4719/PyLuxafor | pyluxafor/cli.py | Python | gpl-3.0 | 9,126 |
'''
*** Ki Renamer ***
usage: kir <filter> <rule> [-p] [-r]
<filter> Regex - Filter the files to rename
<rule> Renaming rule
Options:
--version Displays the current version of Ki Renamer
-h, --help Show usage and options
-p, --preview Print more informations
-r, --recursive Also rename files in sub-directories recursively
Use a regular expression to select the files to rename in the working directory;
'''
import os
import re
from docopt import docopt
__VERSION__ = "0.1"
# args = docopt(__doc__, version=__VERSION__, options_first=False)
### for debugging purpose
preview = True
working_dir = r".\test"
kir_filter = "^(.*)(?i:File)(.*)$"
kir_rule = "\\1abc\\2"
recursive = False
###
### future
# preview = args["--preview"]
# recursive = args["--recursive"]
# kir_filter = args["<filter>"]
# kir_rule = args["<rule>"]
# working_dir = os.getcwd()
###
def _ren(old_name, new_name):
if preview:
print("{} > {}".format(old_name, new_name))
else:
return os.rename( os.path.join(working_dir, old_name),
os.path.join(working_dir, new_name) )
if __name__ == '__main__':
regex = re.compile( kir_filter )
# build the 'to be renamed' list
to_rename = []
files_count = 0
for root, dirs, files in os.walk(working_dir, onerror=None, followlinks=False):
for name in files:
files_count += 1
new_name = regex.sub(kir_rule, name)
if name != new_name :
to_rename.append( (name, new_name ) )
for name in dirs:
pass
if not recursive:
break
# control the result
print("{}/{} Files to rename".format( len(to_rename), files_count))
# apply rename (or preview operation)
for filename, newname in to_rename:
_ren(filename, newname)
print("> done")
| cro-ki/ki-renamer | kir.py | Python | gpl-3.0 | 1,937 |
# -*- coding:utf-8 -*-
'''Created on 2014-8-7 @author: Administrator '''
from sys import path as sys_path
if not '..' in sys_path:sys_path.append("..") #用于import上级目录的模块
import web
#早起的把一个文件分成多个文件,再把class导入
from login.login import (index,login,loginCheck,In,reset,register,find_password)
from blog.blog import (write_blog,upload,blog_content_manage,Get,Del,blog_single_self,blog_single_other)
from admin.admin import (adminAdd,adminGet,adminDel,adminEdit)
#后期应用web.py 的子应用
from wiki.view import wiki_app
from download.download import download_app
from meeting.meeting import meeting_app
from bbs.bbs import bbs_app
urls=(
'/','index',
'/login','login',
'/loginCheck','loginCheck',
'/(admin|user_blog)','In',
'/reset/(.*)','reset',
'/register','register',
'/find_password','find_password',
'/write_blog','write_blog',
'/upload','upload',
'/blog_content_manage','blog_content_manage',
'/Get/classification','Get',
'/Del/blog_content','Del',
'/blog_single_self','blog_single_self',
'/blog_single_other','blog_single_other',
'/admin/add','adminAdd',
'/admin/get','adminGet',
'/admin/del','adminDel',
'/admin/edit','adminEdit',
'/wiki',wiki_app,
'/download',download_app,
'/meeting',meeting_app,
'/bbs',bbs_app,
)
app = web.application(urls ,locals())
#session 在web.config.debug = False模式下可用 可以用一下方式解决 生产中 一般设置web.config.debug = False
web.config.debug = True
if web.config.get('_session') is None:
session = web.session.Session(app,web.session.DiskStore('sessions'))
web.config._session=session
else:
session=web.config._session
#用以下方式可以解决多文件之间传递session的问题
def session_hook():web.ctx.session=session
app.add_processor(web.loadhook(session_hook))
if __name__=='__main__':
app.run()
| lqe/EconomyCompensation | code.py | Python | gpl-3.0 | 2,064 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import glob
import os
import re
import shlex
import subprocess
import stat
import sickbeard
from sickbeard import db
from sickbeard import classes
from sickbeard import common
from sickbeard import exceptions
from sickbeard import helpers
from sickbeard import history
from sickbeard import logger
from sickbeard import notifiers
from sickbeard import show_name_helpers
from sickbeard import scene_exceptions
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from lib.tvdb_api import tvdb_api, tvdb_exceptions
class PostProcessor(object):
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
IGNORED_FILESTRINGS = [ "/.AppleDouble/", ".DS_Store" ]
NZB_NAME = 1
FOLDER_NAME = 2
FILE_NAME = 3
def __init__(self, file_path, nzb_name = None):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# absolute path to the folder that is being processed
self.folder_path = ek.ek(os.path.dirname, ek.ek(os.path.abspath, file_path))
# full path to file
self.file_path = file_path
# file name only
self.file_name = ek.ek(os.path.basename, file_path)
# the name of the folder only
self.folder_name = ek.ek(os.path.basename, self.folder_path)
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.in_history = False
self.release_group = None
self.is_proper = False
self.good_results = {self.NZB_NAME: False,
self.FOLDER_NAME: False,
self.FILE_NAME: False}
self.log = ''
def _log(self, message, level=logger.MESSAGE):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for later.
message: The string to log (unicode)
level: The log level to use (optional)
"""
logger.log(message, level)
self.log += message + '\n'
def _checkForExistingFile(self, existing_file):
"""
Checks if a file exists already and if it does whether it's bigger or smaller than
the file we are post processing
existing_file: The file to compare to
Returns:
DOESNT_EXIST if the file doesn't exist
EXISTS_LARGER if the file exists and is larger than the file we are post processing
EXISTS_SMALLER if the file exists and is smaller than the file we are post processing
EXISTS_SAME if the file exists and is the same size as the file we are post processing
"""
if not existing_file:
self._log(u"There is no existing file so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
# if the new file exists, return the appropriate code depending on the size
if ek.ek(os.path.isfile, existing_file):
# see if it's bigger than our old file
if ek.ek(os.path.getsize, existing_file) > ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is larger than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_LARGER
elif ek.ek(os.path.getsize, existing_file) == ek.ek(os.path.getsize, self.file_path):
self._log(u"File "+existing_file+" is the same size as "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SAME
else:
self._log(u"File "+existing_file+" is smaller than "+self.file_path, logger.DEBUG)
return PostProcessor.EXISTS_SMALLER
else:
self._log(u"File "+existing_file+" doesn't exist so there's no worries about replacing it", logger.DEBUG)
return PostProcessor.DOESNT_EXIST
def _list_associated_files(self, file_path, subtitles_only=False):
"""
For a given file path searches for files with the same name but different extension and returns their absolute paths
file_path: The file to check for associated files
Returns: A list containing all files which are associated to the given file
"""
if not file_path:
return []
file_path_list = []
base_name = file_path.rpartition('.')[0]+'.'
# don't strip it all and use cwd by accident
if not base_name:
return []
# don't confuse glob with chars we didn't mean to use
base_name = re.sub(r'[\[\]\*\?]', r'[\g<0>]', base_name)
for associated_file_path in ek.ek(glob.glob, base_name+'*'):
# only add associated to list
if associated_file_path == file_path:
continue
# only list it if the only non-shared part is the extension or if it is a subtitle
if '.' in associated_file_path[len(base_name):] and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
if subtitles_only and not associated_file_path[len(associated_file_path)-3:] in common.subtitleExtensions:
continue
file_path_list.append(associated_file_path)
return file_path_list
def _delete(self, file_path, associated_files=False):
"""
Deletes the file and optionally all associated files.
file_path: The file to delete
associated_files: True to delete all files which differ only by extension, False to leave them
"""
if not file_path:
return
# figure out which files we want to delete
file_list = [file_path]
if associated_files:
file_list = file_list + self._list_associated_files(file_path)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not deleting anything", logger.DEBUG)
return
# delete the file and any other files which we want to delete
for cur_file in file_list:
self._log(u"Deleting file " + cur_file, logger.DEBUG)
if ek.ek(os.path.isfile, cur_file):
#check first the read-only attribute
file_attribute = ek.ek(os.stat, cur_file)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
self._log('Read only mode on file ' + cur_file + ' Will try to make it writeable', logger.DEBUG)
try:
ek.ek(os.chmod,cur_file,stat.S_IWRITE)
except:
self._log(u'Cannot change permissions of ' + cur_file, logger.WARNING)
ek.ek(os.remove, cur_file)
# do the library update for synoindex
notifiers.synoindex_notifier.deleteFile(cur_file)
def _combined_file_operation (self, file_path, new_path, new_base_name, associated_files=False, action=None, subtitles=False):
"""
Performs a generic operation (move or copy) on a file. Can rename the file as well as change its location,
and optionally move associated files too.
file_path: The full path of the media file to act on
new_path: Destination path where we want to move/copy the file to
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
associated_files: Boolean, whether we should copy similarly-named files too
action: function that takes an old path and new path and does an operation with them (move/copy)
"""
if not action:
self._log(u"Must provide an action for the combined file operation", logger.ERROR)
return
file_list = [file_path]
if associated_files:
file_list = file_list + self._list_associated_files(file_path)
elif subtitles:
file_list = file_list + self._list_associated_files(file_path, True)
if not file_list:
self._log(u"There were no files associated with " + file_path + ", not moving anything", logger.DEBUG)
return
# deal with all files
for cur_file_path in file_list:
cur_file_name = ek.ek(os.path.basename, cur_file_path)
# get the extension
cur_extension = ek.ek(os.path.splitext, cur_file_path)[1][1:]
# check if file have subtitles language
if cur_extension in common.subtitleExtensions:
cur_lang = ek.ek(os.path.splitext, ek.ek(os.path.splitext, cur_file_path)[0])[1][1:]
if cur_lang in sickbeard.SUBTITLES_LANGUAGES:
cur_extension = cur_lang + '.' + cur_extension
# replace .nfo with .nfo-orig to avoid conflicts
if cur_extension == 'nfo':
cur_extension = 'nfo-orig'
# If new base name then convert name
if new_base_name:
new_file_name = new_base_name + '.' + cur_extension
# if we're not renaming we still want to change extensions sometimes
else:
new_file_name = helpers.replaceExtension(cur_file_name, cur_extension)
if sickbeard.SUBTITLES_DIR and cur_extension in common.subtitleExtensions:
subs_new_path = ek.ek(os.path.join, new_path, sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
new_file_path = ek.ek(os.path.join, subs_new_path, new_file_name)
else:
new_file_path = ek.ek(os.path.join, new_path, new_file_name)
action(cur_file_path, new_file_path)
def _move(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
file_path: The full path of the media file to move
new_path: Destination path where we want to move the file to
new_base_name: The base filename (no extension) to use during the move. Use None to keep the same name.
associated_files: Boolean, whether we should move similarly-named files too
"""
def _int_move(cur_file_path, new_file_path):
self._log(u"Moving file from "+cur_file_path+" to "+new_file_path, logger.DEBUG)
try:
helpers.moveFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
self._log("Unable to move file "+cur_file_path+" to "+new_file_path+": "+ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_move, subtitles=subtitles)
def _copy(self, file_path, new_path, new_base_name, associated_files=False, subtitles=False):
"""
file_path: The full path of the media file to copy
new_path: Destination path where we want to copy the file to
new_base_name: The base filename (no extension) to use during the copy. Use None to keep the same name.
associated_files: Boolean, whether we should copy similarly-named files too
"""
def _int_copy (cur_file_path, new_file_path):
self._log(u"Copying file from "+cur_file_path+" to "+new_file_path, logger.DEBUG)
try:
helpers.copyFile(cur_file_path, new_file_path)
helpers.chmodAsParent(new_file_path)
except (IOError, OSError), e:
logger.log("Unable to copy file "+cur_file_path+" to "+new_file_path+": "+ex(e), logger.ERROR)
raise e
self._combined_file_operation(file_path, new_path, new_base_name, associated_files, action=_int_copy, subtitles=subtitles)
def _history_lookup(self):
"""
Look up the NZB name in the history and see if it contains a record for self.nzb_name
Returns a (tvdb_id, season, []) tuple. The first two may be None if none were found.
"""
to_return = (None, None, [])
# if we don't have either of these then there's nothing to use to search the history for anyway
if not self.nzb_name and not self.folder_name:
self.in_history = False
return to_return
# make a list of possible names to use in the search
names = []
if self.nzb_name:
names.append(self.nzb_name)
if '.' in self.nzb_name:
names.append(self.nzb_name.rpartition(".")[0])
if self.folder_name:
names.append(self.folder_name)
myDB = db.DBConnection()
# search the database for a possible match and return immediately if we find one
for curName in names:
sql_results = myDB.select("SELECT * FROM history WHERE resource LIKE ?", [re.sub("[\.\-\ ]", "_", curName)])
if len(sql_results) == 0:
continue
tvdb_id = int(sql_results[0]["showid"])
season = int(sql_results[0]["season"])
self.in_history = True
to_return = (tvdb_id, season, [])
self._log("Found result in history: "+str(to_return), logger.DEBUG)
if curName == self.nzb_name:
self.good_results[self.NZB_NAME] = True
elif curName == self.folder_name:
self.good_results[self.FOLDER_NAME] = True
elif curName == self.file_name:
self.good_results[self.FILE_NAME] = True
return to_return
self.in_history = False
return to_return
def _analyze_name(self, name, file=True):
"""
Takes a name and tries to figure out a show, season, and episode from it.
name: A string which we want to analyze to determine show info from (unicode)
Returns a (tvdb_id, season, [episodes]) tuple. The first two may be None and episodes may be []
if none were found.
"""
logger.log(u"Analyzing name "+repr(name))
to_return = (None, None, [])
if not name:
return to_return
# parse the name to break it into show name, season, and episode
np = NameParser(file)
parse_result = np.parse(name)
self._log("Parsed "+name+" into "+str(parse_result).decode('utf-8'), logger.DEBUG)
if parse_result.air_by_date:
season = -1
episodes = [parse_result.air_date]
else:
season = parse_result.season_number
episodes = parse_result.episode_numbers
to_return = (None, season, episodes)
# do a scene reverse-lookup to get a list of all possible names
name_list = show_name_helpers.sceneToNormalShowNames(parse_result.series_name)
if not name_list:
return (None, season, episodes)
def _finalize(parse_result):
self.release_group = parse_result.release_group
# remember whether it's a proper
if parse_result.extra_info:
self.is_proper = re.search('(^|[\. _-])(proper|repack)([\. _-]|$)', parse_result.extra_info, re.I) != None
# if the result is complete then remember that for later
if parse_result.series_name and parse_result.season_number != None and parse_result.episode_numbers and parse_result.release_group:
test_name = os.path.basename(name)
if test_name == self.nzb_name:
self.good_results[self.NZB_NAME] = True
elif test_name == self.folder_name:
self.good_results[self.FOLDER_NAME] = True
elif test_name == self.file_name:
self.good_results[self.FILE_NAME] = True
else:
logger.log(u"Nothing was good, found "+repr(test_name)+" and wanted either "+repr(self.nzb_name)+", "+repr(self.folder_name)+", or "+repr(self.file_name))
else:
logger.log("Parse result not suficent(all folowing have to be set). will not save release name", logger.DEBUG)
logger.log("Parse result(series_name): " + str(parse_result.series_name), logger.DEBUG)
logger.log("Parse result(season_number): " + str(parse_result.season_number), logger.DEBUG)
logger.log("Parse result(episode_numbers): " + str(parse_result.episode_numbers), logger.DEBUG)
logger.log("Parse result(release_group): " + str(parse_result.release_group), logger.DEBUG)
# for each possible interpretation of that scene name
for cur_name in name_list:
self._log(u"Checking scene exceptions for a match on "+cur_name, logger.DEBUG)
scene_id = scene_exceptions.get_scene_exception_by_name(cur_name)
if scene_id:
self._log(u"Scene exception lookup got tvdb id "+str(scene_id)+u", using that", logger.DEBUG)
_finalize(parse_result)
return (scene_id, season, episodes)
# see if we can find the name directly in the DB, if so use it
for cur_name in name_list:
self._log(u"Looking up "+cur_name+u" in the DB", logger.DEBUG)
db_result = helpers.searchDBForShow(cur_name)
if db_result:
self._log(u"Lookup successful, using tvdb id "+str(db_result[0]), logger.DEBUG)
_finalize(parse_result)
return (int(db_result[0]), season, episodes)
# see if we can find the name with a TVDB lookup
for cur_name in name_list:
try:
t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **sickbeard.TVDB_API_PARMS)
self._log(u"Looking up name "+cur_name+u" on TVDB", logger.DEBUG)
showObj = t[cur_name]
except (tvdb_exceptions.tvdb_exception):
# if none found, search on all languages
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
ltvdb_api_parms['search_all_languages'] = True
t = tvdb_api.Tvdb(custom_ui=classes.ShowListUI, **ltvdb_api_parms)
self._log(u"Looking up name "+cur_name+u" in all languages on TVDB", logger.DEBUG)
showObj = t[cur_name]
except (tvdb_exceptions.tvdb_exception, IOError):
pass
continue
except (IOError):
continue
self._log(u"Lookup successful, using tvdb id "+str(showObj["id"]), logger.DEBUG)
_finalize(parse_result)
return (int(showObj["id"]), season, episodes)
_finalize(parse_result)
return to_return
def _find_info(self):
"""
For a given file try to find the showid, season, and episode.
"""
tvdb_id = season = None
episodes = []
# try to look up the nzb in history
attempt_list = [self._history_lookup,
# try to analyze the nzb name
lambda: self._analyze_name(self.nzb_name),
# try to analyze the file name
lambda: self._analyze_name(self.file_name),
# try to analyze the dir name
lambda: self._analyze_name(self.folder_name),
# try to analyze the file+dir names together
lambda: self._analyze_name(self.file_path),
# try to analyze the dir + file name together as one name
lambda: self._analyze_name(self.folder_name + u' ' + self.file_name)
]
# attempt every possible method to get our info
for cur_attempt in attempt_list:
try:
(cur_tvdb_id, cur_season, cur_episodes) = cur_attempt()
except InvalidNameException, e:
logger.log(u"Unable to parse, skipping: "+ex(e), logger.DEBUG)
continue
# if we already did a successful history lookup then keep that tvdb_id value
if cur_tvdb_id and not (self.in_history and tvdb_id):
tvdb_id = cur_tvdb_id
if cur_season != None:
season = cur_season
if cur_episodes:
episodes = cur_episodes
# for air-by-date shows we need to look up the season/episode from tvdb
if season == -1 and tvdb_id and episodes:
self._log(u"Looks like this is an air-by-date show, attempting to convert the date to season/episode", logger.DEBUG)
# try to get language set for this show
tvdb_lang = None
try:
showObj = helpers.findCertainShow(sickbeard.showList, tvdb_id)
if(showObj != None):
tvdb_lang = showObj.lang
except exceptions.MultipleShowObjectsException:
raise #TODO: later I'll just log this, for now I want to know about it ASAP
try:
# There's gotta be a better way of doing this but we don't wanna
# change the language value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if tvdb_lang and not tvdb_lang == 'en':
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
epObj = t[tvdb_id].airedOn(episodes[0])[0]
season = int(epObj["seasonnumber"])
episodes = [int(epObj["episodenumber"])]
self._log(u"Got season " + str(season) + " episodes " + str(episodes), logger.DEBUG)
except tvdb_exceptions.tvdb_episodenotfound, e:
self._log(u"Unable to find episode with date " + str(episodes[0]) + u" for show " + str(tvdb_id) + u", skipping", logger.DEBUG)
# we don't want to leave dates in the episode list if we couldn't convert them to real episode numbers
episodes = []
continue
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to contact TVDB: " + ex(e), logger.WARNING)
episodes = []
continue
# if there's no season then we can hopefully just use 1 automatically
elif season == None and tvdb_id:
myDB = db.DBConnection()
numseasonsSQlResult = myDB.select("SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [tvdb_id])
if int(numseasonsSQlResult[0][0]) == 1 and season == None:
self._log(u"Don't have a season number, but this show appears to only have 1 season, setting seasonnumber to 1...", logger.DEBUG)
season = 1
if tvdb_id and season != None and episodes:
return (tvdb_id, season, episodes)
return (tvdb_id, season, episodes)
def _get_ep_obj(self, tvdb_id, season, episodes):
"""
Retrieve the TVEpisode object requested.
tvdb_id: The TVDBID of the show (int)
season: The season of the episode (int)
episodes: A list of episodes to find (list of ints)
If the episode(s) can be found then a TVEpisode object with the correct related eps will
be instantiated and returned. If the episode can't be found then None will be returned.
"""
show_obj = None
self._log(u"Loading show object for tvdb_id "+str(tvdb_id), logger.DEBUG)
# find the show in the showlist
try:
show_obj = helpers.findCertainShow(sickbeard.showList, tvdb_id)
except exceptions.MultipleShowObjectsException:
raise #TODO: later I'll just log this, for now I want to know about it ASAP
# if we can't find the show then there's nothing we can really do
if not show_obj:
self._log(u"This show isn't in your list, you need to add it to SB before post-processing an episode", logger.ERROR)
raise exceptions.PostProcessingFailed()
root_ep = None
for cur_episode in episodes:
episode = int(cur_episode)
self._log(u"Retrieving episode object for " + str(season) + "x" + str(episode), logger.DEBUG)
# now that we've figured out which episode this file is just load it manually
try:
curEp = show_obj.getEpisode(season, episode)
except exceptions.EpisodeNotFoundException, e:
self._log(u"Unable to create episode: "+ex(e), logger.DEBUG)
raise exceptions.PostProcessingFailed()
# associate all the episodes together under a single root episode
if root_ep == None:
root_ep = curEp
root_ep.relatedEps = []
elif curEp not in root_ep.relatedEps:
root_ep.relatedEps.append(curEp)
return root_ep
def _get_quality(self, ep_obj):
"""
Determines the quality of the file that is being post processed, first by checking if it is directly
available in the TVEpisode's status or otherwise by parsing through the data available.
ep_obj: The TVEpisode object related to the file we are post processing
Returns: A quality value found in common.Quality
"""
ep_quality = common.Quality.UNKNOWN
# if there is a quality available in the status then we don't need to bother guessing from the filename
if ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER:
oldStatus, ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) #@UnusedVariable
if ep_quality != common.Quality.UNKNOWN:
self._log(u"The old status had a quality in it, using that: "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
return ep_quality
# nzb name is the most reliable if it exists, followed by folder name and lastly file name
name_list = [self.nzb_name, self.folder_name, self.file_name]
# search all possible names for our new quality, in case the file or dir doesn't have it
for cur_name in name_list:
# some stuff might be None at this point still
if not cur_name:
continue
ep_quality = common.Quality.nameQuality(cur_name)
self._log(u"Looking up quality for name "+cur_name+u", got "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
# if we find a good one then use it
if ep_quality != common.Quality.UNKNOWN:
logger.log(cur_name+u" looks like it has quality "+common.Quality.qualityStrings[ep_quality]+", using that", logger.DEBUG)
return ep_quality
# if we didn't get a quality from one of the names above, try assuming from each of the names
ep_quality = common.Quality.assumeQuality(self.file_name)
self._log(u"Guessing quality for name "+self.file_name+u", got "+common.Quality.qualityStrings[ep_quality], logger.DEBUG)
if ep_quality != common.Quality.UNKNOWN:
logger.log(self.file_name+u" looks like it has quality "+common.Quality.qualityStrings[ep_quality]+", using that", logger.DEBUG)
return ep_quality
return ep_quality
def _run_extra_scripts(self, ep_obj):
"""
Executes any extra scripts defined in the config.
ep_obj: The object to use when calling the extra script
"""
for curScriptName in sickbeard.EXTRA_SCRIPTS:
# generate a safe command line string to execute the script and provide all the parameters
script_cmd = shlex.split(curScriptName) + [ep_obj.location, self.file_path, str(ep_obj.show.tvdbid), str(ep_obj.season), str(ep_obj.episode), str(ep_obj.airdate)]
# use subprocess to run the command and capture output
self._log(u"Executing command "+str(script_cmd))
self._log(u"Absolute path to script: "+ek.ek(os.path.abspath, script_cmd[0]), logger.DEBUG)
try:
p = subprocess.Popen(script_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=sickbeard.PROG_DIR)
out, err = p.communicate() #@UnusedVariable
self._log(u"Script result: "+str(out), logger.DEBUG)
except OSError, e:
self._log(u"Unable to run extra_script: "+ex(e))
def _is_priority(self, ep_obj, new_ep_quality):
"""
Determines if the episode is a priority download or not (if it is expected). Episodes which are expected
(snatched) or larger than the existing episode are priority, others are not.
ep_obj: The TVEpisode object in question
new_ep_quality: The quality of the episode that is being processed
Returns: True if the episode is priority, False otherwise.
"""
# if SB downloaded this on purpose then this is a priority download
if self.in_history or ep_obj.status in common.Quality.SNATCHED + common.Quality.SNATCHED_PROPER:
self._log(u"SB snatched this episode so I'm marking it as priority", logger.DEBUG)
return True
# if the user downloaded it manually and it's higher quality than the existing episode then it's priority
if new_ep_quality > ep_obj and new_ep_quality != common.Quality.UNKNOWN:
self._log(u"This was manually downloaded but it appears to be better quality than what we have so I'm marking it as priority", logger.DEBUG)
return True
# if the user downloaded it manually and it appears to be a PROPER/REPACK then it's priority
old_ep_status, old_ep_quality = common.Quality.splitCompositeStatus(ep_obj.status) #@UnusedVariable
if self.is_proper and new_ep_quality >= old_ep_quality:
self._log(u"This was manually downloaded but it appears to be a proper so I'm marking it as priority", logger.DEBUG)
return True
return False
def process(self):
"""
Post-process a given file
"""
self._log(u"Processing " + self.file_path + " (" + str(self.nzb_name) + ")")
if ek.ek(os.path.isdir, self.file_path):
self._log(u"File " + self.file_path + " seems to be a directory")
return False
for ignore_file in self.IGNORED_FILESTRINGS:
if ignore_file in self.file_path:
self._log(u"File " + self.file_path + " is ignored type, skipping")
return False
# reset per-file stuff
self.in_history = False
# try to find the file info
(tvdb_id, season, episodes) = self._find_info()
# if we don't have it then give up
if not tvdb_id or season == None or not episodes:
return False
# retrieve/create the corresponding TVEpisode objects
ep_obj = self._get_ep_obj(tvdb_id, season, episodes)
# get the quality of the episode we're processing
new_ep_quality = self._get_quality(ep_obj)
logger.log(u"Quality of the episode we're processing: " + str(new_ep_quality), logger.DEBUG)
# see if this is a priority download (is it snatched, in history, or PROPER)
priority_download = self._is_priority(ep_obj, new_ep_quality)
self._log(u"Is ep a priority download: " + str(priority_download), logger.DEBUG)
# set the status of the episodes
for curEp in [ep_obj] + ep_obj.relatedEps:
curEp.status = common.Quality.compositeStatus(common.SNATCHED, new_ep_quality)
# check for an existing file
existing_file_status = self._checkForExistingFile(ep_obj.location)
# if it's not priority then we don't want to replace smaller files in case it was a mistake
if not priority_download:
# if there's an existing file that we don't want to replace stop here
if existing_file_status in (PostProcessor.EXISTS_LARGER, PostProcessor.EXISTS_SAME):
self._log(u"File exists and we are not going to replace it because it's not smaller, quitting post-processing", logger.DEBUG)
return False
elif existing_file_status == PostProcessor.EXISTS_SMALLER:
self._log(u"File exists and is smaller than the new file so I'm going to replace it", logger.DEBUG)
elif existing_file_status != PostProcessor.DOESNT_EXIST:
self._log(u"Unknown existing file status. This should never happen, please log this as a bug.", logger.ERROR)
return False
# if the file is priority then we're going to replace it even if it exists
else:
self._log(u"This download is marked a priority download so I'm going to replace an existing file if I find one", logger.DEBUG)
# delete the existing file (and company)
for cur_ep in [ep_obj] + ep_obj.relatedEps:
try:
self._delete(cur_ep.location, associated_files=True)
# clean up any left over folders
if cur_ep.location:
helpers.delete_empty_folders(ek.ek(os.path.dirname, cur_ep.location), keep_dir=ep_obj.show._location)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to delete the existing files")
# if the show directory doesn't exist then make it if allowed
if not ek.ek(os.path.isdir, ep_obj.show._location) and sickbeard.CREATE_MISSING_SHOW_DIRS:
self._log(u"Show directory doesn't exist, creating it", logger.DEBUG)
try:
ek.ek(os.mkdir, ep_obj.show._location)
# do the library update for synoindex
notifiers.synoindex_notifier.addFolder(ep_obj.show._location)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to create the show directory: " + ep_obj.show._location)
# get metadata for the show (but not episode because it hasn't been fully processed)
ep_obj.show.writeMetadata(True)
# update the ep info before we rename so the quality & release name go into the name properly
for cur_ep in [ep_obj] + ep_obj.relatedEps:
with cur_ep.lock:
cur_release_name = None
# use the best possible representation of the release name
if self.good_results[self.NZB_NAME]:
cur_release_name = self.nzb_name
if cur_release_name.lower().endswith('.nzb'):
cur_release_name = cur_release_name.rpartition('.')[0]
elif self.good_results[self.FOLDER_NAME]:
cur_release_name = self.folder_name
elif self.good_results[self.FILE_NAME]:
cur_release_name = self.file_name
# take the extension off the filename, it's not needed
if '.' in self.file_name:
cur_release_name = self.file_name.rpartition('.')[0]
if cur_release_name:
self._log("Found release name " + cur_release_name, logger.DEBUG)
cur_ep.release_name = cur_release_name
else:
logger.log("good results: " + repr(self.good_results), logger.DEBUG)
cur_ep.status = common.Quality.compositeStatus(common.DOWNLOADED, new_ep_quality)
cur_ep.subtitles = []
cur_ep.subtitles_searchcount = 0
cur_ep.subtitles_lastsearch = '0001-01-01 00:00:00'
cur_ep.is_proper = self.is_proper
cur_ep.saveToDB()
# find the destination folder
try:
proper_path = ep_obj.proper_path()
proper_absolute_path = ek.ek(os.path.join, ep_obj.show.location, proper_path)
dest_path = ek.ek(os.path.dirname, proper_absolute_path)
except exceptions.ShowDirNotFoundException:
raise exceptions.PostProcessingFailed(u"Unable to post-process an episode if the show dir doesn't exist, quitting")
self._log(u"Destination folder for this episode: " + dest_path, logger.DEBUG)
# create any folders we need
helpers.make_dirs(dest_path)
# download subtitles
if sickbeard.USE_SUBTITLES and ep_obj.show.subtitles:
cur_ep.location = self.file_path
cur_ep.downloadSubtitles(force=True)
# figure out the base name of the resulting episode file
if sickbeard.RENAME_EPISODES:
orig_extension = self.file_name.rpartition('.')[-1]
new_base_name = ek.ek(os.path.basename, proper_path)
new_file_name = new_base_name + '.' + orig_extension
else:
# if we're not renaming then there's no new base name, we'll just use the existing name
new_base_name = None
new_file_name = self.file_name
try:
# move the episode and associated files to the show dir
if sickbeard.KEEP_PROCESSED_DIR:
self._copy(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES, sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
else:
self._move(self.file_path, dest_path, new_base_name, sickbeard.MOVE_ASSOCIATED_FILES, sickbeard.USE_SUBTITLES and ep_obj.show.subtitles)
except (OSError, IOError):
raise exceptions.PostProcessingFailed("Unable to move the files to their new home")
# put the new location in the database
for cur_ep in [ep_obj] + ep_obj.relatedEps:
with cur_ep.lock:
cur_ep.location = ek.ek(os.path.join, dest_path, new_file_name)
cur_ep.saveToDB()
# log it to history
history.logDownload(ep_obj, self.file_path, new_ep_quality, self.release_group)
# send notifications
notifiers.notify_download(ep_obj._format_pattern('%SN - %Sx%0E - %EN - %QN'))
# generate nfo/tbn
ep_obj.createMetaFiles()
ep_obj.saveToDB()
# do the library update for XBMC
notifiers.xbmc_notifier.update_library(ep_obj.show.name)
# do the library update for Plex
notifiers.plex_notifier.update_library()
# do the library update for NMJ
# nmj_notifier kicks off its library update when the notify_download is issued (inside notifiers)
# do the library update for Synology Indexer
notifiers.synoindex_notifier.addFile(ep_obj.location)
# do the library update for pyTivo
notifiers.pytivo_notifier.update_library(ep_obj)
# do the library update for Trakt
notifiers.trakt_notifier.update_library(ep_obj)
self._run_extra_scripts(ep_obj)
return True
| fernandog/Sick-Beard | sickbeard/postProcessor.py | Python | gpl-3.0 | 41,517 |
# coding=utf-8
from elections.tests import VotaInteligenteTestCase as TestCase
from elections.models import Election
from django.core.urlresolvers import reverse
from candideitorg.models import Candidate
from django.core.management import call_command
class PhotoLoaderCase(TestCase):
def setUp(self):
super(PhotoLoaderCase, self).setUp()
def test_it_loads_the_photo_for_an_existing_candidate(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_if_the_candidate_does_not_exist_it_does_it_for_the_rest(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo_url.csv', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'http://upload.wikimedia.org/wikipedia/commons/7/76/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'http://www.2eso.info/sinonimos/wp-content/uploads/2013/02/feo1.jpg')
def test_it_prepends_url_when_provided(self):
call_command('photo_loader', 'elections/tests/fixtures/candidate_photo.csv', 'some.site/static/', verbosity=0)
jano = Candidate.objects.get(name=u"Alejandro Guillier")
otro = Candidate.objects.get(name=u"Manuel Rojas")
self.assertEquals(jano.photo, 'some.site/static/Alejandro_Guillier.jpg')
self.assertEquals(otro.photo, 'some.site/static/feo1.jpg') | lfalvarez/nouabook | elections/tests/photo_loader_tests.py | Python | gpl-3.0 | 1,842 |