code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011, De Verkeersonderneming <rits@verkeersonderneming.nl>
#
# This file is part of PyRITS - A tool for processing and analyzing transport
# management system data.
#
# PyRITS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyRITS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is the executable for PyRITS. To tun this script, run it from the
command line. The command line interface for PyRITS gives usage information.
Run this script with the -h option to display help information.
PyRITS has four submodules: preprocess, drivetimes, delays and report. The usage
information for each submodule can be viewed by running the command,
./pyrits.py <module> -h
"""
import sys
import os
import logging
import argparse
import psycopg2
import pyrits.config
import pyrits.erniesoft.std
import pyrits.erniesoft.report
__author__ = "Serrano Pereira"
__copyright__ = "Copyright 2011, De Verkeersonderneming"
__credits__ = ["Serrano Pereira <serrano.pereira@gmail.com>"]
__license__ = "GPL3"
__version__ = "0.1.2"
__maintainer__ = "Serrano Pereira"
__email__ = "serrano.pereira@gmail.com"
__status__ = "Production"
__date__ = "2011/11/24"
def get_connection(db):
"""Return a PostgreSQL database connection object."""
conn_string = pyrits.config.cfg.get('conn_string', database=db)
try:
connection = psycopg2.connect(conn_string)
return connection
except:
# Get the most recent exception
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
# Exit the script and print an error telling what happened.
sys.exit("Database connection failed!\n %s" % (exceptionValue))
def main():
# Set logging level.
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
# Read configurations from the config file.
pyrits.config.cfg.read_configuration()
# Create main argument parser.
parser = argparse.ArgumentParser(description='Please specify a task.')
parser.add_argument('--version',
action='version',
help="Print version information.",
version="PyRITS "+__version__)
# Create a sub parser for sub-commands.
subparsers = parser.add_subparsers(help='Specify which task to start.')
help_preprocess = "Perform preprocesses on the database. This must be run once after new data was added to the database."
help_drivetimes = "Calculate driving times."
help_delays = "Calculate driving delays."
help_report = "Generate a report."
# Create an argument parser for sub-command 'preprocess'.
parser_preprocess = subparsers.add_parser('preprocess',
help=help_preprocess,
description=help_preprocess)
parser_preprocess.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
# Create an argument parser for sub-command 'drivetimes'.
parser_drivetimes = subparsers.add_parser('drivetimes',
help=help_drivetimes,
description=help_drivetimes)
parser_drivetimes.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
parser_drivetimes.add_argument('-s, --date-start',
action='store',
type=str,
required=False,
help="Begin date of the records to analyze.",
metavar="YYYY-MM-DD",
dest='date_start')
parser_drivetimes.add_argument('-e, --date-end',
action='store',
type=str,
required=False,
help="End date of the records to analyze.",
metavar="YYYY-MM-DD",
dest='date_end')
parser_drivetimes.add_argument('-o',
action='store',
type=str,
required=False,
help="Specify output folder. If specified, results will be saved to this folder.",
metavar="PATH",
dest='output_folder')
parser_drivetimes.add_argument('-v, --vehicle',
action='store',
type=str,
required=False,
help="Specify the vehicle to be analyzed. If not specified, all vehicles are analyzed.",
metavar="CODE",
dest='vehicle_code')
parser_drivetimes.add_argument('-r, --ride',
action='store',
type=int,
required=False,
help="Specify ride number. If specified, only this ride is analyzed.",
metavar="N",
dest='ride_number')
# Create an argument parser for sub-command 'delays'.
parser_delays = subparsers.add_parser('delays',
help=help_delays,
description=help_delays)
parser_delays.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
# Create an argument parser for sub-command 'report'.
parser_report = subparsers.add_parser('report',
help=help_report,
description=help_report)
parser_report.add_argument('-d, --database',
action='store',
type=str,
choices=['erniesoft','tans'],
required=True,
help="Specify database name. Possible values: erniesoft, tans.",
metavar="DB",
dest='database')
parser_report.add_argument('-s, --date-start',
action='store',
type=str,
required=True,
help="Specify start date.",
metavar="YYYY-MM-DD",
dest='date_start')
parser_report.add_argument('-e, --date-end',
action='store',
type=str,
required=True,
help="Specify end date.",
metavar="YYYY-MM-DD",
dest='date_end')
parser_report.add_argument('-o',
action='store',
type=str,
required=True,
help="Specify output file.",
metavar="FILE",
dest='output_file')
parser_report.add_argument('-t, --type',
action='store',
default='xml',
type=str,
choices=['xml','csv-tasks','csv-stats','routes'],
required=False,
help="Specify output format for the report. Possibe values: xml, csv-tasks, csv-stats, routes. Default is xml.",
metavar="TYPE",
dest='output_format')
parser_report.add_argument('--zip-depth',
action='store',
type=int,
default=10,
choices=[4,5,6,7,8,9,10],
required=False,
help="Zip code depth for grouping routes in reports. Default is 10.",
metavar="N",
dest='zip_depth')
parser_report.add_argument('--top-max',
action='store',
type=int,
default=5,
required=False,
help="The maximum number of items in a top list. Default is 5.",
metavar="N",
dest='top_max')
parser_report.add_argument('--filter-countries',
action='store',
type=str,
default=None,
required=False,
help="Used for the 'routes' report. Filter routes for specific countries. Multiple countries must be separated by comma's (e.g. nl,de).",
metavar="CODES",
dest='filter_countries')
parser_report.add_argument('--routes-filter-actions',
action='store',
type=str,
default=None,
required=False,
help="Used for the 'routes' report. Filter routes for specific action codes. Multiple action codes must be separated by comma's (e.g. laden,lossen).",
metavar="ACTIONS",
dest='routes_filter_actions')
parser_report.add_argument('--routes-method',
action='store',
type=int,
default=1,
choices=[1,2],
required=False,
help="Used for the 'routes' report. Set the method for defining routes. Possible values: 1 for location combinations (default), 2 for location permutations.",
metavar="N",
dest='routes_method')
parser_report.add_argument('--routes-no-replacement',
action='store_const',
const=True,
required=False,
help="Used for the 'routes' report. Skip routes with the same start and end location.",
dest='routes_no_replacement')
# Parse the arguments.
args = parser.parse_args()
# Decide which tool to start.
if sys.argv[1] == 'drivetimes':
drivetimes(args)
elif sys.argv[1] == 'delays':
delays(args)
elif sys.argv[1] == 'report':
report(args)
elif sys.argv[1] == 'preprocess':
preprocess(args)
# Terminate the application.
sys.exit()
def drivetimes(args):
"""Calculate the realized driving times."""
# Do some extra checks.
if args.output_folder and not os.path.exists(args.output_folder):
sys.exit("Path '%s' does not exists." % args.output_folder)
if args.database == "erniesoft":
# Make a connection with the database.
connection = get_connection(args.database)
# Create instance of driving times calculator.
driving_times = pyrits.erniesoft.std.DrivingTimes(connection)
# Calculate for ride number.
if args.ride_number:
exit_status = driving_times.calculate_ride(args.ride_number)
# Save results to database.
#if exit_status == 0: driving_times.save_to_database()
# Write results to output file and display results in browser.
if exit_status == 0 and args.output_folder:
output_file = "drivetimes_ride_%s.html" % args.ride_number
output_file = os.path.join(args.output_folder, output_file)
driving_times.output_html(output_file, open_file=1)
# Calculate for a single vehicle.
elif args.vehicle_code:
exit_status = driving_times.calculate_vehicle(args.vehicle_code, args.date_start, args.date_end)
# Save results to database.
#if exit_status == 0: driving_times.save_to_database()
# Write results to output file and display results in browser.
if exit_status == 0 and args.output_folder:
if not args.date_start:
output_file = "drivetimes_%s.html" % (args.vehicle_code)
else:
output_file = "drivetimes_%s_%s_%s.html" % (args.date_start, args.date_end, args.vehicle_code)
output_file = os.path.join(args.output_folder, output_file)
driving_times.output_html(output_file, open_file=1)
# Calculate for all vehicles.
else:
# Obtain all vehicle codes that belong to this date range.
logging.info("Obtaining vehicle codes...")
vehicle_codes = driving_times.get_vehicles_from_date_range(args.date_start, args.date_end)
vehicle_codes.sort()
if len(vehicle_codes) == 0:
logging.info("No vehicles found. Nothing to do.")
# Remove existing driving times from the database.
logging.info("Purging driving times table...")
driving_times.purge()
# Process data.
for vehicle_code in vehicle_codes:
exit_status = driving_times.calculate_vehicle(vehicle_code, args.date_start, args.date_end)
# Save results to database.
if exit_status == 0: driving_times.save_to_database()
# Write results to output file and display results in browser.
if exit_status == 0 and args.output_folder:
if not args.date_start:
output_file = "drivetimes_%s.html" % (vehicle_code)
else:
output_file = "drivetimes_%s_%s_%s.html" % (args.date_start, args.date_end, vehicle_code)
output_file = os.path.join(args.output_folder, output_file)
driving_times.output_html(output_file, open_file=0)
# Close database connection.
connection.close()
logging.info("Done")
elif args.database == "tans":
sys.exit("Not yet implemented.")
def delays(args):
"""Calculate the planned and realized driving delays."""
if args.database == "erniesoft":
# Make a connection with the database.
connection = get_connection(args.database)
# Create instance of driving times calculator.
driving_delays = pyrits.erniesoft.std.DrivingDelays(connection)
# Calculate delays (this first sets the clean driving times
# and the planned delays, then the realized delays).
driving_delays.calculate_delays_using_modelit()
# Close database connection.
connection.close()
logging.info("Done")
def delays_(args):
"""Calculate the planned and realized driving delays.
.. note::
This function shows the old method of calculating planned and realized
delays. This function has been replaced by :meth:`delays` which uses
Modelit data. This function is no longer in use and is kept here for
reference.
"""
if args.database == "erniesoft":
# Make a connection with the database.
connection = get_connection(args.database)
# Create instance of driving times calculator.
driving_delays = pyrits.erniesoft.std.DrivingDelays(connection)
# Calculate delays for a single vehicle.
if args.vehicle_code:
# Calculate realized delays.
driving_delays.calculate_realized_delays(args.vehicle_code)
# Planned delays must be calculated after the realized delays.
driving_delays.calculate_planned_delays()
# Calculate delays for all vehicles.
else:
# Obtain all vehicle codes.
logging.info("Obtaining vehicle codes...")
vehicle_codes = driving_delays.get_vehicles_from_date_range()
vehicle_codes.sort()
if len(vehicle_codes) == 0:
logging.info("No vehicles found. Nothing to do.")
# Calculate the realized delays for each vehicle.
for vehicle_code in vehicle_codes:
driving_delays.calculate_realized_delays(vehicle_code)
# Planned delays must be calculated after the realized delays.
driving_delays.calculate_planned_delays()
# Close database connection.
connection.close()
logging.info("Done")
elif args.database == "tans":
sys.exit("Not yet implemented.")
def report(args):
"""Generate a report of the driving times and driving delays."""
# Perform extra checks.
if not os.path.dirname(args.output_file):
sys.exit("Path '%s' does not exists." % args.output_file)
if args.zip_depth:
pyrits.config.cfg.set('zip-code-depth', args.zip_depth)
if args.top_max:
pyrits.config.cfg.set('report-top-list-max', args.top_max)
if args.filter_countries:
args.filter_countries = args.filter_countries.split(',')
if args.routes_filter_actions:
args.routes_filter_actions = args.routes_filter_actions.split(',')
if args.routes_no_replacement:
routes_replacement = False
else:
routes_replacement = True
# Start calculations.
if args.database == "erniesoft":
# Make a connection with the database.
connection = get_connection(args.database)
# Export the report.
if args.output_format == 'xml':
report = pyrits.erniesoft.report.XMLReport(connection)
report.make(args.date_start, args.date_end)
report.export(args.output_file)
elif args.output_format == 'csv-tasks':
report = pyrits.erniesoft.report.CSVReport(connection)
report.export_driving_times(args.output_file, args.date_start, args.date_end)
elif args.output_format == 'csv-stats':
report = pyrits.erniesoft.report.CSVReport(connection)
report.export_statistics(args.output_file, args.date_start, args.date_end)
elif args.output_format == 'routes':
report = pyrits.erniesoft.report.CSVReport(connection)
report.set_route_frequencies(args.routes_method, args.filter_countries, args.routes_filter_actions, routes_replacement)
report.export_route_frequencies(args.output_file)
# Close database connection.
connection.close()
elif args.database == "tans":
sys.exit("Not yet implemented.")
def preprocess(args):
"""Preprocess the database.
Preprocesses must be performed on each database before doing any other
task.
- For the Erniesoft database, this means defining the vehicle code and
route for each task.
"""
if args.database == "erniesoft":
# Make a connection with the database.
connection = get_connection(args.database)
# Process data.
preprocessor = pyrits.erniesoft.std.Preprocess(connection)
preprocessor.start()
# Close database connection.
connection.close()
elif args.database == "tans":
sys.exit("Not yet implemented.")
if __name__ == "__main__":
main()
| figure002/pyrits | pyrits.py | Python | gpl-3.0 | 17,967 |
# -*- coding: utf-8 -*-
'''
Sphinx setting.
'''
import os.path
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
extensions = [
'mamchecker.inl',
'sphinx.ext.mathjax',
'sphinxcontrib.tikz',
'sphinxcontrib.texfigure']
# i.e. same as conf.py and with page.html containing only {{body}}
templates_path = ['.']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
default_role = 'math'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
tikz_proc_suite = 'ImageMagick'
tikz_tikzlibraries = 'arrows,snakes,backgrounds,patterns,matrix,shapes,fit,calc,shadows,plotmarks'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
'preamble': '''\\usepackage{amsfonts}\\usepackage{amssymb}\\usepackage{amsmath}\\usepackage{siunitx}\\usepackage{tikz}'''
+ '''
\\usetikzlibrary{''' + tikz_tikzlibraries + '''}'''
}
# latex
# sphinx-build[2] -b latex -c . -D master_doc=<rst-file> -D project=<rst-file> <src-dir> <build-dir>
# sphinx-build2 -b latex -c . -D master_doc=vector -D project=vector r/b _build
# html
# sphinx-build[2] -b html -c . -D master_doc=<rst-file> -D project=<rst-file> <src-dir> <build-dir>
# sphinx-build2 -c . -D master_doc=vector -D project=vector r/b _build
| mamchecker/mamchecker | mamchecker/conf.py | Python | gpl-3.0 | 1,452 |
"""This file sets up a command line manager.
Use "python manage.py" for a list of available commands.
Use "python manage.py runserver" to start the development web server on localhost:5000.
Use "python manage.py runserver --help" for additional runserver options.
"""
from flask_migrate import MigrateCommand
from flask_script import Manager, commands
from app import create_app
from app.commands import InitDbCommand
# Setup Flask-Script with command line commands
manager = Manager(create_app)
manager.add_command('db', MigrateCommand)
manager.add_command('init_db', InitDbCommand)
manager.add_command('runserver', commands.Server(host="0.0.0.0", port=None, threaded=True))
if __name__ == "__main__":
# python manage.py # shows available commands
# python manage.py runserver --help # shows available runserver options
manager.run()
| jennywoites/MUSSA | MUSSA_Flask/manage.py | Python | gpl-3.0 | 879 |
#!/usr/bin/env python
import os
import sys
## A name of directory containing 'path:...' file
## You can download them using 'make-wget_pathway.sh' script
dir_name = sys.argv[1]
f_summary = open('%s.summary'%dir_name,'w')
f_genes = open('%s.genes'%dir_name,'w')
f_compounds = open('%s.compounds'%dir_name,'w')
gene_total = []
for filename in os.listdir( dir_name ):
if( not filename.startswith('path:') ):
continue
#sys.stderr.write('Read %s ... '%filename)
path_id = ''
path_name = ''
gene_list = []
comp_list = []
prev_tag = ''
f = open(os.path.join(dir_name,filename),'r')
for line in f:
tmp_tag = line[:11].strip()
if( tmp_tag == 'ENTRY' ):
path_id = line.strip().split()[1]
if( tmp_tag == 'NAME' ):
path_name = line[11:].split(' - ')[0].strip()
if( tmp_tag == 'COMPOUND' ):
comp_list.append( line[11:].strip().split()[0] )
f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
elif( tmp_tag == '' and prev_tag == 'COMPOUND' ):
comp_list.append( line[11:].strip().split()[0] )
f_compounds.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
elif( tmp_tag == 'GENE' ):
gene_list.append( line[11:].strip().split()[0] )
f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
#print line[11:].strip()
elif( tmp_tag == '' and prev_tag == 'GENE' ):
gene_list.append( line[11:].strip().split()[0] )
f_genes.write('path:%s\t%s\n'%(path_id,line[11:].strip()))
#print line[11:].strip()
if( tmp_tag != '' ):
prev_tag = tmp_tag
f.close()
if( len(gene_list) == 0 ):
sys.stderr.write('//SKIP// %s(%d) %s\n'%(path_id, len(gene_list), path_name))
continue
f_summary.write('path:%s\t%s\t%d\t%d\n'%(path_id, path_name, len(gene_list), len(comp_list)))
f_summary.close()
f_genes.close()
f_compounds.close()
| taejoonlab/taejoonlab-toolbox | KEGG/make-pathway2list.py | Python | gpl-3.0 | 2,013 |
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QLabel
from PyQt4.QtGui import QLineEdit
from PyQt4.QtGui import QMessageBox
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QPushButton
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QWidget
import qtUtils
from utils import constants
from utils import errors
from utils import utils
class QNickInputWidget(QWidget):
def __init__(self, image, imageWidth, connectClickedSlot, nick='', parent=None):
QWidget.__init__(self, parent)
self.connectClickedSlot = connectClickedSlot
# Image
self.image = QLabel(self)
self.image.setPixmap(QPixmap(qtUtils.getAbsoluteImagePath(image)).scaledToWidth(imageWidth, Qt.SmoothTransformation))
# Nick field
self.nickLabel = QLabel("Nickname:", self)
self.nickEdit = QLineEdit(nick, self)
self.nickEdit.setMaxLength(constants.NICK_MAX_LEN)
self.nickEdit.returnPressed.connect(self.__connectClicked)
# Connect button
self.connectButton = QPushButton("Connect", self)
self.connectButton.resize(self.connectButton.sizeHint())
self.connectButton.setAutoDefault(False)
self.connectButton.clicked.connect(self.__connectClicked)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.nickLabel)
hbox.addWidget(self.nickEdit)
hbox.addStretch(1)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addWidget(self.connectButton)
vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.image)
hbox.addSpacing(10)
hbox.addLayout(vbox)
hbox.addStretch(1)
self.setLayout(hbox)
def __connectClicked(self):
nick = str(self.nickEdit.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
self.connectClickedSlot(nick)
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
| kostyll/Cryptully | cryptully/qt/qNickInputWidget.py | Python | gpl-3.0 | 2,500 |
"""Run the interactive pulse program.
Keys:
- Escape - Exit the program
- Space - Update program image
- C - Calibrate the image again
"""
import time
import cv2
from pulse_programming import PulseField
from camera_calibration import Calibration
window = "Camera Pulse Programming"
cv2.namedWindow("Threshold", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("Capture", cv2.WINDOW_AUTOSIZE)
calibration = Calibration((1024, 768), window_name=window)
def calibrate():
calibration.record_points(20)
calibration.show_area_in_camera()
print("Please move the window to fill the screen and press any key.")
calibration.wait_for_key_press()
calibrate()
def update_pulse_program_from_camera():
calibration.fill_white()
cv2.waitKey(1)
image = calibration.warp_camera_in_projection()
cv2.imshow("Capture", image)
pulse_field.set_program_image(image, blue_threshold=0.57)
pulse_field = PulseField()
#pulse_field.DELATION_ITERATIONS = 4
#pulse_field.EROSION_ITERATIONS = 3
update_pulse_program_from_camera()
while True:
key = cv2.waitKey(1)
if key == 27: # Escape
exit(0)
elif key == 32: # Space
update_pulse_program_from_camera()
elif key == ord("c"): # Calibrate
calibrate()
t = time.time()
pulse_field.pulse()
print("duration:", time.time() - t)
cv2.imshow(window, pulse_field.get_pulse_gray())
| niccokunzmann/pulse-programming | camera_pulse.py | Python | gpl-3.0 | 1,372 |
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2015, Mostapha Sadeghipour Roudsari <Sadeghipour@gmail.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Radiance Mirror Material By Color
Read more here to understand Radiance materials: http://www.artifice.com/radiance/rad_materials.html
-
Provided by Honeybee 0.0.57
Args:
_materialName: Unique name for this material
_color: color of the glass
Returns:
avrgTrans: Average transmittance of this glass
RADMaterial: Radiance Material string
"""
ghenv.Component.Name = "Honeybee_Radiance Mirror Material By Color"
ghenv.Component.NickName = 'radMirrorMaterialByColor'
ghenv.Component.Message = 'VER 0.0.57\nJUL_06_2015'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "01 | Daylight | Material"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import math
import scriptcontext as sc
import Grasshopper.Kernel as gh
# read here to understand RAD materials
# http://www.artifice.com/radiance/rad_materials.html
def getTransmissivity(transmittance):
return (math.sqrt(0.8402528435 + 0.0072522239 * (transmittance ** 2)) - 0.9166530661 ) / 0.0036261119 / transmittance
def createRadMaterial(modifier, name, *args):
# I should check the inputs here
radMaterial = "void " + modifier + " " + name + "\n" + \
"0\n" + \
"0\n" + \
`int(len(args))`
for arg in args: radMaterial = radMaterial + (" " + "%.3f"%arg)
return radMaterial + "\n"
def main():
modifier = "mirror"
if sc.sticky.has_key('honeybee_release'):
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
if _materialName!=None and _color != None:
RTransmittance = _color.R/255
GTransmittance = _color.G/255
BTransmittance = _color.B/255
if 0 <= RTransmittance <= 1 and 0 <= GTransmittance <= 1 and 0 <= BTransmittance <= 1:
avrgTrans = (0.265 * RTransmittance + 0.670 * GTransmittance + 0.065 * BTransmittance)
materialName = _materialName.Replace(" ", "_")
RADMaterial = createRadMaterial(modifier, materialName, RTransmittance, GTransmittance, BTransmittance)
return RADMaterial
else:
msg = "Transmittance values should be between 0 and 1"
e = gh.GH_RuntimeMessageLevel.Error
ghenv.Component.AddRuntimeMessage(e, msg)
else:
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee to fly...")
RADMaterial = main() | samuto/Honeybee | src/Honeybee_Radiance Mirror Material By Color.py | Python | gpl-3.0 | 4,158 |
from fnmatch import fnmatch
import os
from sys import exit
import html2text
import re
import urllib
import shutil
import logging
import sqlite3
from sys import stdout
logging.basicConfig(filename='error_log.log', filemode='a')
class BackupEvernote(object):
def __init__(self, evernote_dir, db_dir='', output_dir=''):
self.forbidden = ["?", "#", "/", "\\", "*", '"', "<", ">", "|", "%", " "]
self.fb_w_trail = self.forbidden
del self.fb_w_trail[2]
self.evernote_dir = evernote_dir
self.db_dir = db_dir
self.output_dir = output_dir
def _counter(self, ind, msg):
stdout.write('\r' + '\t%s: %s' % (msg, ind))
stdout.flush()
def _exception(self, msg, file_path, e):
logging.error(e)
while True:
inp = input('Cannot %s: %s\n'
'Error: %s\n'
'Skip & continue? y/n: ' % (msg, file_path, e))
if inp == 'y':
break
else:
exit(0)
return
def _multi_asterisk_fix(self, matchobj):
return matchobj.group(0)[1:]
def _get_pt(self, string):
path = string.split('[')[1].split(']')[0]
title = string.split('(')[1].split(')')[0]
return path, title
def _image_url_fix(self, matchobj):
url = ''
string = matchobj.group(0)
# remove escape chars
if '\n' in string:
string = string.replace('\n', ' ')
# this is a url
if '![' not in string:
url, _ = self._get_pt(string)
return '%s' % url
# image contains url
url_pat = re.compile(r'.(\[.*\])\(.*\)\(.*\)$', re.MULTILINE)
if re.match(url_pat, string):
url = string.rpartition('(')[-1].strip(')')
# image with or without url
title, path = self._get_pt(string)
path = '%s/%s/%s' % (self.output_dir, 'uncategorized', path)
# todo: image path (remove random dash? -_) i.e t seal img
path = self._remove_chars(path, self.fb_w_trail, trail=True)
path += '?800'
if not url:
return '{{%s|%s}}' % (path, title)
else:
return '[[%s|{{%s|%s}}]]' % (url, path, title)
def _remove_asterisks(self, matchobj):
return re.sub(r'\**', '', matchobj.group(0))
def _fix_spacing(self, matchobj):
# todo: add wider bullet conversions i.e imac note
string = matchobj.group(0)
s_len = len(string) - 1
if s_len <= 1:
return string
elif s_len == 2:
return '*'
elif s_len == 6:
return ' *'
elif s_len == 7:
return ' *'
elif s_len == 11:
return ' *'
else:
return string
def to_zim_syntax(self, content):
""" Consider editing this func to fit the syntax of your chosen note taking software"""
# headers
# todo: remove heading chars / do not add heading if proceeded by image or url
# todo: only ensure 1 h1 header (first), replace other h1's with h2
new_c = content.replace('####', '=').('### ', '== ').replace('## ', '==== ').replace('# ', '====== ')
# line separation?
# todo: remake regex r'[#*_-]{3,}' not proceeded by words (\W) i.e jsand
line_pat = re.compile(r'^\*[^\S\n]*\*[^\S\n]*\*\n', re.MULTILINE)
new_c = re.sub(line_pat, ('-' * 80), new_c)
# todo: regex to replace 3+ line breaks with 2
# todo: regex for bold text, 2 * followed by words then 2 *
# todo: replace \- at start of the line with bullet
# fix bullet lists
new_c = re.sub(r'\*[^\S\n]+?\*', self._multi_asterisk_fix, new_c) # multiple asterisks on same line
spaces = re.compile(r'^[^\S\n]*\*', re.MULTILINE)
new_c = re.sub(spaces, self._fix_spacing, new_c)
# fix urls and images
new_c = re.sub(r'\*{2}(\[)|\)\*{2}', self._remove_asterisks, new_c)
# new_c = re.sub(r'!*\[[^\]]*\]\([^\)]*\)', self._image_url_fix, new_c)
new_c = re.sub(r'!*[\\\[]*\[[^\]]*[\\\]]*\([^\)]*[\]\)]*(\([^\)]*\))*', self._image_url_fix, new_c)
return new_c
def edit_file(self, full_path, filename, to_zim=False):
text_maker = html2text.HTML2Text()
with open(full_path, 'r') as f:
html = f.read()
content = ''
if html:
try:
content = text_maker.handle(unicode(html, errors='ignore'))
content = content.encode('ascii', 'ignore')
content = content.split('\00')[0] # remove null chars
content = content.replace('\.', '.') # remove escape chars
except Exception as e:
self._exception('convert content of note to markdown', full_path, e)
else:
content = ''
if to_zim:
content = self.to_zim_syntax(content)
fn_path = self._rename_file(full_path, filename)
with open(fn_path, 'w') as f:
try:
f.write(content.encode('ascii', 'ignore'))
except Exception as e:
self._exception('save note', fn_path, e)
return
def _remove_chars(self, stack_or_nb, folder_chars, trail=False):
try:
if not trail:
stack_or_nb = stack_or_nb.replace('/', '&')
for char in folder_chars:
if char in stack_or_nb:
stack_or_nb = stack_or_nb.replace(char, '_')
except Exception:
raise
finally:
return stack_or_nb
def _rename_file(self, full_path, filename, trail=False):
filename = self._remove_chars(filename, self.forbidden, trail)
renamed = filename.replace('.html', '.txt')
old_filename = full_path.rpartition('/')[-1]
return full_path.replace(old_filename, renamed)
def nbooks_to_dirs(self):
""" creates notebook & notebook stack folder structure containing all respective notes"""
print "\nOrganizing notes by directory (based on notebooks & stacks)..."
copied = []
con = sqlite3.connect(self.db_dir)
notebooks = con.execute("SELECT * FROM notebook_attr;").fetchall()
folder_chars = self.forbidden
del folder_chars[2]
for ind, i in enumerate(notebooks):
nb_id, notebook, stack = i[0], i[1], i[2]
stack = self._remove_chars(stack, folder_chars)
notebook = self._remove_chars(notebook, folder_chars)
nb_notes = con.execute('SELECT * FROM note_attr WHERE note_attr.notebook_uid = %s;' % nb_id)
notes_set = {i[1] for i in nb_notes}
s_dir = ''
if notebook and not stack:
notebook_dir = self.output_dir + '/' + notebook
if not os.path.isdir(notebook_dir):
os.mkdir(notebook_dir)
s_dir = notebook_dir
else:
if stack:
stack_path = self.output_dir + '/' + stack
if not os.path.isdir(stack_path):
os.mkdir(stack_path)
s_dir = stack_path
if notebook:
nb_in_stack = self.output_dir + '/%s/%s' % (stack, notebook)
if not os.path.isdir(nb_in_stack):
os.mkdir(nb_in_stack)
s_dir = nb_in_stack
for p, d, files in os.walk(self.evernote_dir):
for f in files:
fl = urllib.unquote(f)
fl_name = fl.rpartition('.')[0]
f_path = os.path.join(p, f)
if fl_name in notes_set:
copied.append(fl)
out_path = os.path.join(s_dir, f)
shutil.copy(f_path, out_path)
os.rename(out_path, os.path.join(s_dir, fl))
self._counter(ind, 'notebooks/stacks exported')
self.transfer_uncategorized(copied)
return
def transfer_uncategorized(self, copied):
print "\nTransfering the rest of the files that do not belong to a notebook..."
uncategorized = os.path.join(self.output_dir, 'uncategorized')
os.mkdir(uncategorized)
ind = 0
for fl in os.listdir(self.evernote_dir):
if fl not in copied:
f_path = os.path.join(self.evernote_dir, fl)
out_path = os.path.join(uncategorized, fl)
try:
shutil.copy(f_path, out_path)
except IOError:
shutil.copytree(f_path, out_path)
finally:
ind += 1
self._counter(ind, 'copied files/dirs')
# rename all files and folders within output folder
for p, dirs, files in os.walk(self.output_dir):
for d in dirs:
new_d = self._remove_chars(d, self.forbidden)
new_d = new_d.replace('.html', '.txt')
os.rename(os.path.join(p, d), os.path.join(p, new_d))
for p, dirs, files in os.walk(self.output_dir):
for f in files:
new_f = self._remove_chars(f, self.forbidden)
new_f = new_f.replace('.html', '.txt')
os.rename(os.path.join(p, f), os.path.join(p, new_f))
return
def to_markdown(self, zim_sintax=False):
print "\nConverting note syntax..."
c_dir = self.output_dir or self.evernote_dir
ind = 0
for p, d, files in os.walk(c_dir):
for f in files:
fl_path = os.path.join(p, f)
if fnmatch(f, '*.txt'):
self.edit_file(fl_path, f, zim_sintax)
ind += 1
self._counter(ind, 'edited files')
def backup(self, notebooks_to_dirs=True, to_markdown=False, zim_sintax=False):
if notebooks_to_dirs:
self.nbooks_to_dirs()
if to_markdown or zim_sintax:
self.to_markdown(zim_sintax)
return
if __name__ == '__main__':
notes_dir = '/media/truecrypt2'
db_dir = '/home/unknown/evernote_backup/Databases/shawnx22.exb'
output_dir = '/home/unknown/tmp_notes'
# output_dir = '/home/evernote_backup/Notes'
ev = BackupEvernote(notes_dir, db_dir, output_dir)
ev.backup()
ev.to_markdown(zim_sintax=True) | shawndaniel/evernote-exporter | evernote_exporter.py | Python | gpl-3.0 | 10,506 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
CURRENT_DIR = os.path.dirname(__file__)
setup(name='datapot',
description='Library for automatic feature extraction from JSON-datasets',
long_description=open(os.path.join(CURRENT_DIR, 'README.rst')).read(),
version='0.1.3',
url='https://github.com/bashalex/datapot',
author='Alex Bash, Yuriy Mokriy, Nikita Saveyev, Michal Rozenwald, Peter Romov',
author_email='avbashlykov@gmail.com, yurymokriy@gmail.com, n.a.savelyev@gmail.com, michal.rozenwald@gmail.com, romovpa@gmail.com',
license='GNU v3.0',
maintainer='Nikita Savelyev',
maintainer_email='n.a.savelyev@gmail.com',
install_requires=[
'numpy >= 1.6.1',
'scipy >= 0.17.0',
'pandas >= 0.17.1',
'scikit-learn >= 0.17.1',
'iso-639 >= 0.4.5',
'langdetect >= 1.0.7',
'gensim >= 2.1.0',
'nltk >= 3.2.4',
'tsfresh >= 0.7.1',
'python-dateutil >= 2.6.0',
'fastnumbers >= 2.0.1',
'pystemmer >= 1.3.0',
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
packages=find_packages())
| bashalex/datapot | setup.py | Python | gpl-3.0 | 1,582 |
# -*- coding: utf-8 -*-
###############################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
###############################################################################
import re
from random import random
from urllib import unquote
from urlparse import urlparse
from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, PluginParseError, replace_patterns
from module.plugins.internal.CaptchaService import ReCaptcha, SolveMedia
from module.utils import html_unescape
from module.network.RequestFactory import getURL
class XFileSharingPro(SimpleHoster):
"""
Common base for XFileSharingPro hosters like EasybytezCom, CramitIn, FiledinoCom...
Some hosters may work straight away when added to __pattern__
However, most of them will NOT work because they are either down or running a customized version
"""
__name__ = "XFileSharingPro"
__type__ = "hoster"
__pattern__ = r'^unmatchable$'
__version__ = "0.31"
__description__ = """XFileSharingPro base hoster plugin"""
__author_name__ = ("zoidberg", "stickell")
__author_mail__ = ("zoidberg@mujmail.cz", "l.stickell@yahoo.it")
FILE_INFO_PATTERN = r'<tr><td align=right><b>Filename:</b></td><td nowrap>(?P<N>[^<]+)</td></tr>\s*.*?<small>\((?P<S>[^<]+)\)</small>'
FILE_NAME_PATTERN = r'<input type="hidden" name="fname" value="(?P<N>[^"]+)"'
FILE_SIZE_PATTERN = r'You have requested .*\((?P<S>[\d\.\,]+) ?(?P<U>\w+)?\)</font>'
OFFLINE_PATTERN = r'>\w+ (Not Found|file (was|has been) removed)'
WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>'
OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
CAPTCHA_URL_PATTERN = r'(http://[^"\']+?/captchas?/[^"\']+)'
RECAPTCHA_URL_PATTERN = r'http://[^"\']+?recaptcha[^"\']+?\?k=([^"\']+)"'
CAPTCHA_DIV_PATTERN = r'>Enter code.*?<div.*?>(.*?)</div>'
SOLVEMEDIA_PATTERN = r'http:\/\/api\.solvemedia\.com\/papi\/challenge\.script\?k=(.*?)"'
ERROR_PATTERN = r'class=["\']err["\'][^>]*>(.*?)</'
def setup(self):
if self.__name__ == "XFileSharingPro":
self.__pattern__ = self.core.pluginManager.hosterPlugins[self.__name__]['pattern']
self.multiDL = True
else:
self.resumeDownload = self.multiDL = self.premium
self.chunkLimit = 1
def process(self, pyfile):
self.prepare()
pyfile.url = replace_patterns(pyfile.url, self.FILE_URL_REPLACEMENTS)
if not re.match(self.__pattern__, pyfile.url):
if self.premium:
self.handleOverriden()
else:
self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
else:
try:
# Due to a 0.4.9 core bug self.load would use cookies even if
# cookies=False. Workaround using getURL to avoid cookies.
# Can be reverted in 0.5 as the cookies bug has been fixed.
self.html = getURL(pyfile.url, decode=True)
self.file_info = self.getFileInfo()
except PluginParseError:
self.file_info = None
self.location = self.getDirectDownloadLink()
if not self.file_info:
pyfile.name = html_unescape(unquote(urlparse(
self.location if self.location else pyfile.url).path.split("/")[-1]))
if self.location:
self.startDownload(self.location)
elif self.premium:
self.handlePremium()
else:
self.handleFree()
def prepare(self):
""" Initialize important variables """
if not hasattr(self, "HOSTER_NAME"):
self.HOSTER_NAME = re.match(self.__pattern__, self.pyfile.url).group(1)
if not hasattr(self, "LINK_PATTERN"):
self.LINK_PATTERN = r'(http://([^/]*?%s|\d+\.\d+\.\d+\.\d+)(:\d+)?(/d/|(?:/files)?/\d+/\w+/)[^"\'<]+)' % self.HOSTER_NAME
self.captcha = self.errmsg = None
self.passwords = self.getPassword().splitlines()
def getDirectDownloadLink(self):
""" Get download link for premium users with direct download enabled """
self.req.http.lastURL = self.pyfile.url
self.req.http.c.setopt(FOLLOWLOCATION, 0)
self.html = self.load(self.pyfile.url, cookies=True, decode=True)
self.header = self.req.http.header
self.req.http.c.setopt(FOLLOWLOCATION, 1)
location = None
m = re.search(r"Location\s*:\s*(.*)", self.header, re.I)
if m and re.match(self.LINK_PATTERN, m.group(1)):
location = m.group(1).strip()
return location
def handleFree(self):
url = self.getDownloadLink()
self.logDebug("Download URL: %s" % url)
self.startDownload(url)
def getDownloadLink(self):
for i in xrange(5):
self.logDebug("Getting download link: #%d" % i)
data = self.getPostParameters()
self.req.http.c.setopt(FOLLOWLOCATION, 0)
self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
self.header = self.req.http.header
self.req.http.c.setopt(FOLLOWLOCATION, 1)
m = re.search(r"Location\s*:\s*(.*)", self.header, re.I)
if m:
break
m = re.search(self.LINK_PATTERN, self.html, re.S)
if m:
break
else:
if self.errmsg and 'captcha' in self.errmsg:
self.fail("No valid captcha code entered")
else:
self.fail("Download link not found")
return m.group(1)
def handlePremium(self):
self.html = self.load(self.pyfile.url, post=self.getPostParameters())
m = re.search(self.LINK_PATTERN, self.html)
if m is None:
self.parseError('DIRECT LINK')
self.startDownload(m.group(1))
def handleOverriden(self):
#only tested with easybytez.com
self.html = self.load("http://www.%s/" % self.HOSTER_NAME)
action, inputs = self.parseHtmlForm('')
upload_id = "%012d" % int(random() * 10 ** 12)
action += upload_id + "&js_on=1&utype=prem&upload_type=url"
inputs['tos'] = '1'
inputs['url_mass'] = self.pyfile.url
inputs['up1oad_type'] = 'url'
self.logDebug(self.HOSTER_NAME, action, inputs)
#wait for file to upload to easybytez.com
self.req.http.c.setopt(LOW_SPEED_TIME, 600)
self.html = self.load(action, post=inputs)
action, inputs = self.parseHtmlForm('F1')
if not inputs:
self.parseError('TEXTAREA')
self.logDebug(self.HOSTER_NAME, inputs)
if inputs['st'] == 'OK':
self.html = self.load(action, post=inputs)
elif inputs['st'] == 'Can not leech file':
self.retry(max_tries=20, wait_time=3 * 60, reason=inputs['st'])
else:
self.fail(inputs['st'])
#get easybytez.com link for uploaded file
m = re.search(self.OVR_LINK_PATTERN, self.html)
if m is None:
self.parseError('DIRECT LINK (OVR)')
self.pyfile.url = m.group(1)
header = self.load(self.pyfile.url, just_header=True)
if 'location' in header: # Direct link
self.startDownload(self.pyfile.url)
else:
self.retry()
def startDownload(self, link):
link = link.strip()
if self.captcha:
self.correctCaptcha()
self.logDebug('DIRECT LINK: %s' % link)
self.download(link, disposition=True)
def checkErrors(self):
m = re.search(self.ERROR_PATTERN, self.html)
if m:
self.errmsg = m.group(1)
self.logWarning(re.sub(r"<.*?>", " ", self.errmsg))
if 'wait' in self.errmsg:
wait_time = sum([int(v) * {"hour": 3600, "minute": 60, "second": 1}[u] for v, u in
re.findall(r'(\d+)\s*(hour|minute|second)?', self.errmsg)])
self.wait(wait_time, True)
elif 'captcha' in self.errmsg:
self.invalidCaptcha()
elif 'premium' in self.errmsg and 'require' in self.errmsg:
self.fail("File can be downloaded by premium users only")
elif 'limit' in self.errmsg:
self.wait(1 * 60 * 60, True)
self.retry(25)
elif 'countdown' in self.errmsg or 'Expired' in self.errmsg:
self.retry()
elif 'maintenance' in self.errmsg:
self.tempOffline()
elif 'download files up to' in self.errmsg:
self.fail("File too large for free download")
else:
self.fail(self.errmsg)
else:
self.errmsg = None
return self.errmsg
def getPostParameters(self):
for _ in xrange(3):
if not self.errmsg:
self.checkErrors()
if hasattr(self, "FORM_PATTERN"):
action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
else:
action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
if not inputs:
action, inputs = self.parseHtmlForm('F1')
if not inputs:
if self.errmsg:
self.retry()
else:
self.parseError("Form not found")
self.logDebug(self.HOSTER_NAME, inputs)
if 'op' in inputs and inputs['op'] in ("download2", "download3"):
if "password" in inputs:
if self.passwords:
inputs['password'] = self.passwords.pop(0)
else:
self.fail("No or invalid passport")
if not self.premium:
m = re.search(self.WAIT_PATTERN, self.html)
if m:
wait_time = int(m.group(1)) + 1
self.setWait(wait_time, False)
else:
wait_time = 0
self.captcha = self.handleCaptcha(inputs)
if wait_time:
self.wait()
self.errmsg = None
return inputs
else:
inputs['referer'] = self.pyfile.url
if self.premium:
inputs['method_premium'] = "Premium Download"
if 'method_free' in inputs:
del inputs['method_free']
else:
inputs['method_free'] = "Free Download"
if 'method_premium' in inputs:
del inputs['method_premium']
self.html = self.load(self.pyfile.url, post=inputs, ref=True)
self.errmsg = None
else:
self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
def handleCaptcha(self, inputs):
m = re.search(self.RECAPTCHA_URL_PATTERN, self.html)
if m:
recaptcha_key = unquote(m.group(1))
self.logDebug("RECAPTCHA KEY: %s" % recaptcha_key)
recaptcha = ReCaptcha(self)
inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(recaptcha_key)
return 1
else:
m = re.search(self.CAPTCHA_URL_PATTERN, self.html)
if m:
captcha_url = m.group(1)
inputs['code'] = self.decryptCaptcha(captcha_url)
return 2
else:
m = re.search(self.CAPTCHA_DIV_PATTERN, self.html, re.DOTALL)
if m:
captcha_div = m.group(1)
self.logDebug(captcha_div)
numerals = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))])
self.logDebug("CAPTCHA", inputs['code'], numerals)
return 3
else:
m = re.search(self.SOLVEMEDIA_PATTERN, self.html)
if m:
captcha_key = m.group(1)
captcha = SolveMedia(self)
inputs['adcopy_challenge'], inputs['adcopy_response'] = captcha.challenge(captcha_key)
return 4
return 0
getInfo = create_getInfo(XFileSharingPro)
| estaban/pyload | module/plugins/hoster/XFileSharingPro.py | Python | gpl-3.0 | 13,352 |
#!/usr/bin/python
# vim: ai:ts=4:sw=4:sts=4:et:fileencoding=utf-8
#
# Thermal monitor
#
# Copyright 2013 Michal Belica <devel@beli.sk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import serial
import sys
import signal
import select
import re
import time
import subprocess
from optparse import OptionParser
class ThermalMonitor(object):
def zabbix_sender(self):
proc = subprocess.Popen(['zabbix_sender', '-z', self.options.zabbix, '-p',
self.options.port, '-s', self.options.host, '-i', '-'], stdin=subprocess.PIPE)
for addr,temp in self.data.items():
proc.communicate('- %s[%s] %g\n' % (self.options.key, addr, temp))
proc.stdin.close()
proc.wait()
def parse_options(self):
parser = OptionParser()
parser.add_option("-d", "--device", dest="device",
help="read from serial port DEVICE (required)", metavar="DEVICE")
parser.add_option("-s", "--speed", dest="speed", type="int", default=9600,
help="serial port baud rate (default: 9600)", metavar="BAUD")
parser.add_option("-i", "--interval", dest="interval", type="int", default=10,
help="sampling interval (default: 10)", metavar="SECONDS")
parser.add_option("-z", "--zabbix", dest="zabbix",
help="Zabbix server (required)", metavar="ADDR")
parser.add_option("-p", "--port", dest="port", default="10051",
help="listening port of Zabbix server (default: 10051)", metavar="PORT")
parser.add_option("-n", "--host", dest="host",
help="name of host in Zabbix (required)", metavar="NAME")
parser.add_option("-k", "--key", dest="key", default="thermal_monitor",
help="item key base name; device address will be added as an argument, "
+"e.g. thermal_monitor[addr] (default: thermal_monitor)", metavar="key")
(self.options, self.args) = parser.parse_args()
# check for required options
for opt in ['device', 'zabbix', 'host']:
if opt not in self.options.__dict__ or self.options.__dict__[opt] is None:
parser.error("parameter --%s is required" % opt)
def sighandler_terminate(self, signum, frame):
self.running = False
def register_signals(self, ignore=[],
terminate=[signal.SIGINT, signal.SIGTERM, signal.SIGHUP]):
for sig in ignore:
signal.signal(sig, signal.SIG_IGN)
for sig in terminate:
signal.signal(sig, self.sighandler_terminate)
def open_serial(self):
self.ser = serial.Serial(self.options.device, self.options.speed)
self.ser.readline() # ignore first (incomplete) line
def __init__(self):
self.running = False
self.data = dict()
self.register_signals()
self.parse_options()
self.open_serial()
self.cre = re.compile(r"R=(?P<addr>\w+)\s+T=(?P<temp>[.0-9]+)\r?$")
def start(self):
self.running = True
next = time.time()
sent = False
while self.running:
try:
line = self.ser.readline()
except select.error as e:
if e[0] == 4: # interrupted system call
continue
else:
raise
if time.time() > next:
next += self.options.interval
# clears the list to send all addresses again
for k,v in self.data.items():
self.data[k] = None
sent = False
elif sent:
# data already sent in this cycle
continue
m = self.cre.search(line)
if m:
# line matched pattern
addr = m.group('addr')
temp = float(m.group('temp'))
if addr not in self.data or self.data[addr] is None:
# address not yet collected in this cycle
self.data[addr] = temp
else:
# repeating address reached - send out data
print "sending", addr, temp
self.zabbix_sender()
sent = True
else:
print "invalid line received"
self.cleanup()
def cleanup(self):
self.ser.close()
if __name__ == "__main__":
thermalmonitor = ThermalMonitor()
thermalmonitor.start()
| beli-sk/thermal_monitor | thermal_monitor.py | Python | gpl-3.0 | 5,081 |
""" aulario.py """
BACK_BUTTON_TEXT = "Indietro ❌"
DAY_SELECTION = "Seleziona la data della lezione che ti interessa."
AULARIO_WARNING = "⚠️ Aulario non ancora pronto, riprova fra qualche minuto ⚠️"
LESSON_SELECTION = "Quale lezione devi seguire?"
NO_LESSON_WARNING = "Nessuna lezione programmata per questo giorno"
""" drive_contribute.py """
NO_USERNAME_WARNING = "Nessuno username"
USE_TEXT = "USO: /drive_contribute [e-mail] [motivazione]\n\nESEMPIO: /drive_contribute mario.rossi@gmail.com Vorrei caricare i miei appunti di Fondamenti di Informatica"
CONFIRM_ACCESS = "Hai ottenuto l'accesso in scrittura alla cartella Drive! \n\nPresto ti arriverà un'email di conferma per gli accessi in scrittura e potrai aggiungere appunti nella cartella mediante questo link https://cutt.ly/unict-dmi-drive"
VALIDATION_ERROR = "Si é verificato un errore durante la validazione dell'email, riprova più tardi o verifica se hai già gli accessi in scrittura alla cartella mediante questo link https://cutt.ly/unict-dmi-drive"
""" esami.py """
PRIVATE_USE_WARNING = "Questo comando è utilizzabile solo in privato"
GROUP_WARNING = "Dal comando /esami che hai eseguito in un gruppo"
""" gdrive.py """
NO_GROUP_WARNING = "La funzione /drive non è ammessa nei gruppi"
ERROR_DEVS = "Si è verificato un errore, ci scusiamo per il disagio. Contatta i devs. /help"
""" help.py """
DIPARTIMENTO_CDL = "🏢 Dipartimento e CdL"
REGOLAMENTO_DIDATTICO = "🪧 Regolamento Didattico"
SEGRETERIA_CONTATTI = "🕐 Segreteria orari e contatti"
ERSU_ORARI = "🍽 ERSU orari e contatti"
APPUNTI_CLOUD = "☁️ Appunti & Cloud"
PROGETTI_RICONOSCIMENTI = "🏅 Progetti e Riconoscimenti"
ALL_COMMANDS = "Tutti i comandi"
CLOSE = "❌ Chiudi"
BACK_TO_MENU = "🔙 Torna al menu"
""" lezioni.py """
LE_USE_WARNING = "Questo comando è utilizzabile solo in privato"
LE_GROUP_WARNING = "Dal comando lezioni che hai eseguito in un gruppo"
""" regolmento_didattico """
GRAD_SELECT = "Scegliere uno dei corsi di laurea:"
YEAR_SELECT = "Scegliere il regolamento in base al proprio anno di immatricolazione:"
INF_COURSE = "Scegliere uno dei seguenti corsi (Informatica):"
MAT_COURSE = "Scegliere uno dei seguenti corsi (Matematica):"
RET_FILE = "Ecco il file richiesto:"
""" report.py """
REP_WARNING = "Errore. Inserisci la tua segnalazione dopo /report (Ad esempio /report Invasione ingegneri in corso.)"
""" stats.py """
EASTER_EGG = ("leiCheNePensaSignorina", "smonta_portoni", "santino", "bladrim", "prof_sticker")
| UNICT-DMI/Telegram-DMI-Bot | module/data/vars.py | Python | gpl-3.0 | 2,524 |
#!/usr/bin/env python
################################################################################
# Copyright 2015 Brecht Baeten
# This file is part of mpcpy.
#
# mpcpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpcpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpcpy. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import pyomo.environ as pyomo
import mpcpy
# Disturbances
time = np.arange(0.,24.01*3600.,3600.)
dst = {
'time': time,
'T_am': 5 + 2*np.sin(2*np.pi*time/24./3600.)+273.15,
'Q_flow_so': 500 + 500*np.sin(2*np.pi*time/24./3600.),
'p_el': 0.2 + 0.05*np.sin(2*np.pi*time/24./3600.),
'Q_flow_hp_max': 5000*np.ones_like(time),
'T_in_min': 20*np.ones_like(time)+273.15,
'T_em_max': 30*np.ones_like(time)+273.15
}
disturbances = mpcpy.Disturbances(dst, periodic=False)
# test
print(disturbances(1800))
print(disturbances(24.5 * 3600)) # extrapolation
# Emulator
class Emulator(mpcpy.Emulator):
"""
A custom system emulator
"""
def simulate(self, starttime, stoptime, input):
dt = 60
time = np.arange(starttime, stoptime+dt, dt, dtype=np.float)
# initialize
T_em = np.ones_like(time)*self.res['T_em'][-1]
T_in = np.ones_like(time)*self.res['T_in'][-1]
# interpolate inputs
Q_flow_hp = np.interp(time, input['time'], input['Q_flow_hp'])
Q_flow_so = np.interp(time, input['time'], input['Q_flow_so'])
T_am = np.interp(time, input['time'], input['T_am'])
for i,t in enumerate(time[:-1]):
# C_em dT_em/dt = Q_flow_hp - UA_em_in*(T_em-T_in)
T_em[i+1] = T_em[i] + (Q_flow_hp[i] - self.parameters['UA_em_in']*(T_em[i]-T_in[i]))*dt/self.parameters['C_em']
# C_in dT_in/dt = Q_flow_so - UA_em_in*(T_in-T_em) - UA_in_am*(T_in-T_am)
T_in[i+1] = T_in[i] + (Q_flow_so[i] - self.parameters['UA_em_in']*(T_in[i]-T_em[i]) - self.parameters['UA_in_am']*(T_in[i]-T_am[i]))*dt/self.parameters['C_em']
# create and return a results dict
res = {
'time': time,
'Q_flow_hp':Q_flow_hp,
'Q_flow_so':Q_flow_so,
'T_em':T_em,
'T_in':T_in,
'T_am':T_am,
}
return res
# Emulator parameters and initial conditions:
emulator_parameters = {
'C_em': 10e6,
'C_in': 5e6,
'UA_in_am': 200,
'UA_em_in': 1600
}
emulator_initial_conditions = {
'T_em': 22+273.15,
'T_in': 21+273.15
}
emulator = Emulator(['T_am','Q_flow_so','Q_flow_hp'],parameters=emulator_parameters,initial_conditions=emulator_initial_conditions)
emulator.initialize()
# test
inp = {
'time': [0., 3600., 7200.],
'T_am': [273.15, 274.15, 275.15],
'Q_flow_so': [500., 400., 300.],
'Q_flow_hp': [4000., 4000., 4000.]
}
emulator(np.arange(0., 7201., 1200.), inp)
print(emulator.res['time'])
print(emulator.res['T_em'])
# State estimation
class StateestimationPerfect(mpcpy.Stateestimation):
"""
Custom state estimation method
"""
def stateestimation(self, time):
state = {}
state['T_in'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_in'])
state['T_em'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_em'])
return state
stateestimation = StateestimationPerfect(emulator)
# test
print(stateestimation(0))
# Prediction
prediction = mpcpy.Prediction(disturbances)
# test
print(prediction([0., 1800., 3600.]))
# Control
class LinearProgram(mpcpy.Control):
def formulation(self):
"""
formulates the abstract optimal control problem
"""
model = pyomo.AbstractModel()
# sets
model.i = pyomo.Set() # initialize=range(len(time)-1)
model.ip = pyomo.Set() # initialize=range(len(time))
# parameters
model.time = pyomo.Param(model.ip)
model.UA_em_in = pyomo.Param(initialize=800.)
model.UA_in_am = pyomo.Param(initialize=200.)
model.C_in = pyomo.Param(initialize=5.0e6)
model.C_em = pyomo.Param(initialize=20.0e6)
model.T_in_ini = pyomo.Param(initialize=21.+273.15)
model.T_em_ini = pyomo.Param(initialize=22.+273.15)
model.T_in_min = pyomo.Param(initialize=20.+273.15)
model.T_in_max = pyomo.Param(initialize=24.+273.15)
model.T_am = pyomo.Param(model.i, initialize=0.+273.15)
model.Q_flow_so = pyomo.Param(model.i, initialize=0.)
# variables
model.T_in = pyomo.Var(model.ip,domain=pyomo.Reals, initialize=20.+273.15)
model.T_em = pyomo.Var(model.ip,domain=pyomo.Reals,initialize=20.+273.15)
model.T_in_min_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.T_in_max_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.Q_flow_hp = pyomo.Var(model.i,domain=pyomo.NonNegativeReals,bounds=(0.,10000.),initialize=0.)
# constraints
model.state_T_em = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_em*(model.T_em[i+1]-model.T_em[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_hp[i] \
- model.UA_em_in*(model.T_em[i]-model.T_in[i])
)
model.ini_T_em = pyomo.Constraint(rule=lambda model: model.T_em[0] == model.T_em_ini)
model.state_T_in = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_in*(model.T_in[i+1]-model.T_in[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_so[i] \
- model.UA_em_in*(model.T_in[i]-model.T_em[i]) \
- model.UA_in_am*(model.T_in[i]-model.T_am[i])
)
model.ini_T_in = pyomo.Constraint(rule=lambda model: model.T_in[0] == model.T_in_ini)
# soft constraints
model.constraint_T_in_min_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_min_slack[i] >= model.T_in_min-model.T_in[i]
)
model.constraint_T_in_max_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_max_slack[i] >= model.T_in[i]-model.T_in_max
)
# a large number
L = 1e6
# objective
model.objective = pyomo.Objective(
rule=lambda model: sum(model.Q_flow_hp[i]*(model.time[i+1]-model.time[i])/3600/1000 for i in model.i) \
+sum(model.T_in_min_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
+sum(model.T_in_max_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
)
self.model = model
def solution(self, sta, pre):
"""
instanciate the optimal control problem, solve it and return a solution dictionary
"""
ip = np.arange(len(pre['time']))
data = {
None: {
'i': {None: ip[:-1]},
'ip': {None: ip},
'time': {(i,): v for i, v in enumerate(pre['time'])},
'T_am': {(i,): pre['T_am'][i] for i in ip[:-1]},
'Q_flow_so': {(i,): pre['Q_flow_so'][i] for i in ip[:-1]},
'T_em_ini': {None: sta['T_em']},
'T_in_ini': {None: sta['T_in']},
'C_em': {None: self.parameters['C_em']},
'C_in': {None: self.parameters['C_in']},
'UA_em_in': {None: self.parameters['UA_em_in']},
'UA_in_am': {None: self.parameters['UA_in_am']},
}
}
instance = self.model.create_instance(data)
optimizer = pyomo.SolverFactory('ipopt')
results = optimizer.solve(instance)
# return the contol inputs
sol = {
'time': np.array([pyomo.value(instance.time[i]) for i in instance.ip]),
'T_em': np.array([pyomo.value(instance.T_em[i]) for i in instance.ip]),
'T_in': np.array([pyomo.value(instance.T_in[i]) for i in instance.ip]),
'Q_flow_hp': np.array([pyomo.value(instance.Q_flow_hp[i]) for i in instance.i]),
}
return sol
# Control parameters
control_parameters = {
'C_in': emulator_parameters['C_in'],
'C_em': emulator_parameters['C_em'],
'UA_in_am': emulator_parameters['UA_in_am'],
'UA_em_in': emulator_parameters['UA_em_in'],
}
# create an instance
control = LinearProgram(stateestimation, prediction, parameters=control_parameters, horizon=24*3600., timestep=3600.)
# test
print(control(0))
# MPC
mpc = mpcpy.MPC(emulator, control, disturbances, emulationtime=1*24*3600., resulttimestep=60)
res = mpc(verbose=1)
# Plot results
fix, ax = plt.subplots(2, 1)
ax[0].plot(res['time']/3600, res['Q_flow_hp'], 'k', label='hp')
ax[0].plot(res['time']/3600, res['Q_flow_so'], 'g', label='sol')
ax[0].set_ylabel('Heat flow rate (W)')
ax[0].legend(loc='lower right')
ax[1].plot(res['time']/3600, res['T_in']-273.17, 'k', label='in')
ax[1].plot(res['time']/3600, res['T_em']-273.17, 'b', label='em')
ax[1].plot(res['time']/3600, res['T_am']-273.17, 'g', label='amb')
ax[1].set_ylabel('Temperature ($^\circ$C)')
ax[1].set_xlabel('Time (h)')
ax[1].legend(loc='lower right')
# Using the default emulator
# The default emulator simply reuses the control solution. The results are a bit different due to model mismatch.
def_emulator = mpcpy.Emulator(['T_am', 'Q_flow_so', 'Q_flow_hp'],initial_conditions=emulator_initial_conditions)
emulator.initialize()
def_stateestimation = StateestimationPerfect(def_emulator)
def_control = LinearProgram(def_stateestimation, prediction,
parameters=control_parameters, horizon=24*3600., timestep=3600.)
def_mpc = mpcpy.MPC(def_emulator, def_control, disturbances, emulationtime=1*24*3600., resulttimestep=60)
def_res = def_mpc(verbose=1)
fix, ax = plt.subplots(2, 1)
ax[0].plot(def_res['time']/3600, def_res['Q_flow_hp'], 'k', label='hp')
ax[0].plot(res['time']/3600, res['Q_flow_hp'], 'k--')
ax[0].plot(def_res['time']/3600, def_res['Q_flow_so'], 'g', label='sol')
ax[0].set_ylabel('Heat flow rate (W)')
ax[0].legend(loc='lower right')
ax[1].plot(def_res['time']/3600, def_res['T_in']-273.17, 'k', label='in')
ax[1].plot(def_res['time']/3600, def_res['T_em']-273.17, 'b', label='em')
ax[1].plot(def_res['time']/3600, def_res['T_am']-273.17, 'g', label='amb')
ax[1].plot(res['time']/3600, res['T_in']-273.17, 'k--')
ax[1].plot(res['time']/3600, res['T_em']-273.17, 'b--')
ax[1].set_ylabel('Temperature ($^\circ$C)')
ax[1].set_xlabel('Time (h)')
ax[1].legend(loc='lower right')
if __name__ == '__main__':
plt.show()
| BrechtBa/mpcpy | examples/simple_space_heating_mpc.py | Python | gpl-3.0 | 11,279 |
from dolfin import error, info
class Heart:
def __init__(self, cell_model):
self._cell_model = cell_model
# Mandatory stuff
def mesh(self):
error("Need to prescribe domain")
def conductivities(self):
error("Need to prescribe conducitivites")
# Optional stuff
def applied_current(self):
return None
def end_time(self):
info("Using default end time (T = 1.0)")
return 1.0
def essential_boundaries(self):
return None
def essential_boundary_values(self):
return None
def initial_conditions(self):
return None
def neumann_boundaries(self):
return None
def boundary_current(self):
return None
# Peculiar stuff (for now)
def is_dynamic(self):
return True
# Helper functions
def cell_model(self):
return self._cell_model
| Juanlu001/CBC.Solve | cbc/beat/heart.py | Python | gpl-3.0 | 897 |
# * *************************************************************
# *
# * Soft Active Mater on Surfaces (SAMoS)
# *
# * Author: Rastko Sknepnek
# *
# * Division of Physics
# * School of Engineering, Physics and Mathematics
# * University of Dundee
# *
# * (c) 2013, 2014
# *
# * School of Science and Engineering
# * School of Life Sciences
# * University of Dundee
# *
# * (c) 2015
# *
# * Author: Silke Henkes
# *
# * Department of Physics
# * Institute for Complex Systems and Mathematical Biology
# * University of Aberdeen
# *
# * (c) 2014, 2015
# *
# * This program cannot be used, copied, or modified without
# * explicit written permission of the authors.
# *
# * ***************************************************************
# Utility code for building regular rods initial configuration on xy plane
from datetime import *
from random import uniform, randint
from math import *
import argparse
from particle import *
class Plane:
def __init__(self, Lx, Ly, N, lx, ly, sigma, l):
self.L = (Lx,Ly)
self.N = N
self.lx = lx
self.ly = ly
self.sigma = sigma
self.l = l
self.__generate()
def __generate(self):
self.particles = []
i = 0
n = 0
add_n = True
while add_n:
x = -0.5*self.L[0] + (n+0.5)*self.lx
if x > 0.5*(self.L[0]-self.lx)+1e-3:
add_n = False
else:
m = 0
add_m = True
while add_m:
y = -0.5*self.L[1] + (m+0.5)*self.ly
if y > 0.5*(self.L[1]-self.ly)+1e-3:
add_m = False
else:
self.particles.append(Particle(i))
self.particles[i].r = [x,y,0.0]
self.particles[i].n = [2.0*(randint(0,1)-0.5),0.0,0.0]
self.particles[i].v = [0.0,0.0,0.0]
self.particles[i].R = 0.5*self.sigma
self.particles[i].l = self.l
self.particles[i].omega = 0.0
i += 1
m += 1
n += 1
def write(self,outfile):
gentime = datetime.now()
out = open(outfile,'w')
out.write('# Total of %d particles\n' % len(self.particles))
out.write('# Generated on : %s\n' % str(gentime))
out.write('# id type radius x y z vx vy vz nx ny nz omega l\n')
for p in self.particles:
x, y, z = p.r
vx, vy, vz = p.v
nx, ny, nz = p.n
out.write('%d %d %f %f %f %f %f %f %f %f %f %f %f %f\n' % (p.idx,p.tp,p.R,x,y,z,vx,vy,vz,nx,ny,nz,p.omega,p.l))
out.close()
parser = argparse.ArgumentParser()
parser.add_argument("-x", "--lx", type=float, default=10.0, help="box length in x direction")
parser.add_argument("-y", "--ly", type=float, default=10.0, help="box length in y direction")
parser.add_argument("-f", "--phi", type=float, default=0.5, help="packing fraction")
parser.add_argument("-o", "--output", type=str, default='out.dat', help="output file")
parser.add_argument("-v", "--vavr", type=float, default=1.0, help="average velocity")
parser.add_argument("-a", "--radius", type=float, default=0.5, help="rod radius")
parser.add_argument("-l", "--length", type=float, default=2.0, help="rod length")
args = parser.parse_args()
area = args.lx*args.ly
sigma = 2.0*args.radius
Arod = sigma*(args.length+0.25*pi*sigma)
N = int(round(area*args.phi/Arod))
p = (args.length+sigma)/sigma
lx = sqrt(Arod*p/args.phi)
ly = sqrt(Arod/(p*args.phi))
print p, lx, ly
print
print "\tActive Particles on Curved Spaces (APCS)"
print "\tBuilding of a random flat configuration (xy plane)"
print
print "\tRastko Sknepnek"
print "\tUniversity of Dundee"
print "\t(c) 2013, 2014, 2015"
print "\t----------------------------------------------"
print
print "\tLx : ", args.lx
print "\tLy : ", args.ly
print "\tPacking fraction : ", args.phi
print "\tNumber of particles : ", N
print "\tOutput file : ", args.output
print "\tRod radius : ", args.radius
print "\tRod length : ", args.length
print
start = datetime.now()
random_orinet = True
#if args.l1 != 2.0 or args.l2 != 1.0:
#random_orinet = True
p = Plane(args.lx, args.ly, N, lx, ly, sigma, args.length)
p.write(args.output)
print "Actual packing fraction for this box : ", len(p.particles)*Arod/area
end = datetime.now()
total = end - start
print
print " *** Completed in ", total.total_seconds(), " seconds *** "
print
| sknepneklab/SAMoS | configurations/MakeConfigurations/regular_rods.py | Python | gpl-3.0 | 4,354 |
#!/usr/bin/python
# Given an array of n integers where n > 1, nums, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].
class Solution(object):
# @param {integer[]} nums
# @return {integer[]}
def productExceptSelf(self, nums):
p = 1
n = len(nums)
output = []
# Forward range.
for i in range(0,n):
output.append(p)
p = p * nums[i]
p = 1
# Backword range.
for i in range(n-1,-1,-1):
output[i] = output[i] * p
p = p * nums[i]
return output
nums=[2,3,4,5]
obj = Solution()
output = obj.productExceptSelf(nums)
print(output)
| ravyg/algorithms | python/238_productArrayExceptSelf.py | Python | gpl-3.0 | 721 |
from django.contrib.auth.models import AbstractUser
from django.db import models
STATUT = ['docteur', 'secrétaire', 'interne', 'remplaçant']
class UnoUser(AbstractUser):
"""
Base User class for unolog
define statu
"""
MEDECIN = "medecin"
SECRETAIRE = "secretaire"
INTERNE = "interne"
REMPLACANT = "remplacant"
STATUT = (
(MEDECIN, 'Médecin'),
(SECRETAIRE, 'Secrétaire'),
(INTERNE, "Interne"),
(REMPLACANT, "Remplaçant"),
)
statut = models.CharField(max_length=20, choices=STATUT)
"""
RPPS
ADELI
https://github.com/codingforentrepreneurs/srvup-rest-framework/blob/master/src/accounts/models.py
"""
| jgirardet/unolog | unolog/unousers/models.py | Python | gpl-3.0 | 686 |
import numpy as np
import matplotlib.pyplot as plt
b = map(int,open('data/dist_distances_enfermos_same_chr').readlines())
a = map(int,open('data/dist_distances_sanos_same_chr').readlines())
plt.hist(b, bins=100, normed=0, alpha=0.5, label='enfermos')
plt.hist(a, bins=100, normed=0, alpha=0.5, label='sanos')
plt.legend(loc='upper right')
plt.show()
| CSB-IG/circos_rnaseq_tcga_brca | RNAseq_TCGA_BRCA/plot.py | Python | gpl-3.0 | 353 |
'''
9.5 First, you have to resolve assignment9_3. This is slightly different.
This program records the domain name (instead of the address) where the message
was sent from instead of who the mail came from (i.e., the whole email address).
At the end of the program, print out the contents of your dictionary.
Sample:
python assignment9_5_dictionary.py
{'media.berkeley.edu': 4, 'uct.ac.za': 6, 'umich.edu': 7,
'gmail.com': 1, 'caret.cam.ac.uk': 1, 'iupui.edu': 8}
'''
dDomain = dict()
try:
flHand = open("mbox-short.txt")
except:
print('There is no "mbox-short.txt" file in the same folder as this script.')
else:
for sLine in flHand:
if not sLine.startswith('From '):
continue
lWords = sLine.split()
lEmailDomain = lWords[1].split('@')
dDomain[lEmailDomain[1]] = dDomain.get(lEmailDomain[1], 0) + 1
print (dDomain)
| hosseinoliabak/learningpy | 09_5_dictionary.py | Python | gpl-3.0 | 877 |
from threading import Thread
import time
from scapy.all import *
class AttackProcess(Thread):
def __init__(self, main):
Thread.__init__(self)
self.main = main
self.selected_hosts = []
self.is_attacking = False
def run(self):
while True:
while self.is_attacking:
packets = []
for host in self.main.HostMgr.hosts:
if host.is_selected:
packets.append(host.packet)
time.sleep(1)
send(packets)
time.sleep(5) | GadgeurX/NetworkLiberator | Daemon/AttackProcess.py | Python | gpl-3.0 | 586 |
'''
primepalCodeEval.py - Solution to Problem Prime Palindrome (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Write a program to determine the biggest prime palindrome under 1000.
Input sample:
None
Output sample:
Your program should print the largest palindrome on stdout. i.e.
929
'''
from math import sqrt
def isPrime(num):
if num%2 == 0:
return False
else:
for i in xrange(3, int(sqrt(num)), 2):
if num % i == 0:
return False
return True
if __name__ == '__main__':
for num in reversed(xrange(1000)):
if str(num) == str(num)[::-1] and isPrime(num):
print num
break | shubhamVerma/code-eval | Category - Easy/primePalindromeCodeEval.py | Python | gpl-3.0 | 1,290 |
import lxml.html
from .bills import NHBillScraper
from .legislators import NHLegislatorScraper
from .committees import NHCommitteeScraper
metadata = {
'abbreviation': 'nh',
'name': 'New Hampshire',
'capitol_timezone': 'America/New_York',
'legislature_name': 'New Hampshire General Court',
'legislature_url': 'http://www.gencourt.state.nh.us/',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{'name': '2011-2012', 'sessions': ['2011', '2012'],
'start_year': 2011, 'end_year': 2012},
{'name': '2013-2014', 'sessions': ['2013', '2014'],
'start_year': 2013, 'end_year': 2014},
{'name': '2015-2016', 'sessions': ['2015', '2016'],
'start_year': 2015, 'end_year': 2016},
{'name': '2017-2018', 'sessions': ['2017'],
'start_year': 2017, 'end_year': 2018}
],
'session_details': {
'2011': {'display_name': '2011 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2011%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2011 Session',
},
'2012': {'display_name': '2012 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2012%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2012 Session',
},
'2013': {'display_name': '2013 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2013%20Session%20Bill%20Status%20Tables.zip',
# Their dump filename changed, probably just a hiccup.
'_scraped_name': '2013',
# '_scraped_name': '2013 Session',
},
'2014': {'display_name': '2014 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2014%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2014 Session',
},
'2015': {'display_name': '2015 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2015%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2015 Session',
},
'2016': {'display_name': '2016 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2016%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2016 Session',
},
'2017': {'display_name': '2017 Regular Session',
'_scraped_name': '2017 Session',
},
},
'feature_flags': ['subjects', 'influenceexplorer'],
'_ignored_scraped_sessions': ['2013 Session','2017 Session Bill Status Tables Link.txt'],
}
def session_list():
from billy.scrape.utils import url_xpath
zips = url_xpath('http://gencourt.state.nh.us/downloads/',
'//a[contains(@href, "Bill%20Status%20Tables")]/text()')
return [zip.replace(' Bill Status Tables.zip', '') for zip in zips]
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
return doc.xpath('//html')[0].text_content()
| cliftonmcintosh/openstates | openstates/nh/__init__.py | Python | gpl-3.0 | 3,210 |
from http import HTTPStatus
from json import loads
from unittest.mock import Mock, patch
from uuid import uuid4
from tornado.testing import gen_test
from pokerserver.database import PlayerState, UUIDsRelation
from pokerserver.models import InvalidTurnError, NotYourTurnError, Player, PositionOccupiedError
from tests.utils import IntegrationHttpTestCase, create_table, return_done_future
class TestTableController(IntegrationHttpTestCase):
async def async_setup(self):
self.table_id = 1
self.uuid = uuid4()
self.uuid2 = uuid4()
self.player_name = 'c'
self.player_name2 = 'd'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
await UUIDsRelation.add_uuid(self.uuid2, self.player_name2)
players = [
Player(self.table_id, 1, 'a', 0, ['Ah', 'Ac'], 0),
Player(self.table_id, 2, 'b', 0, ['Kh', 'Kc'], 0),
Player(self.table_id, 5, 'c', 0, ['Qh', 'Qc'], 0)
]
table = await create_table(table_id=self.table_id, players=players)
self.table_name = table.name
@gen_test
async def test_get_for_player_at_table(self):
await self.async_setup()
response = await self.fetch_async('/table/{}?uuid={}'.format(self.table_name, self.uuid))
self.assertEqual(response.code, HTTPStatus.OK.value)
table = loads(response.body.decode())
self.assertEqual(table, {
'big_blind': 2,
'can_join': False,
'current_player': None,
'dealer': None,
'state': 'waiting for players',
'round': 'preflop',
'open_cards': [],
'players': [{
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'a',
'bet': 0,
'position': 1,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'b',
'bet': 0,
'position': 2,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': ['Qh', 'Qc'],
'name': 'c',
'bet': 0,
'position': 5,
'state': PlayerState.PLAYING.value
}],
'pots': [{
'bets': {}
}],
'small_blind': 1
})
@gen_test
async def test_get_for_player_not_at_table(self):
await self.async_setup()
response = await self.fetch_async('/table/{}?uuid={}'.format(self.table_name, self.uuid2))
self.assertEqual(response.code, HTTPStatus.OK.value)
table = loads(response.body.decode())
self.assertEqual(table, {
'big_blind': 2,
'can_join': True,
'current_player': None,
'dealer': None,
'state': 'waiting for players',
'round': 'preflop',
'open_cards': [],
'players': [{
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'a',
'bet': 0,
'position': 1,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'b',
'bet': 0,
'position': 2,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'c',
'bet': 0,
'position': 5,
'state': PlayerState.PLAYING.value
}],
'pots': [{
'bets': {}
}],
'small_blind': 1
})
@gen_test
async def test_get_for_unauthorized_player(self):
await self.async_setup()
response = await self.fetch_async('/table/{}'.format(self.table_name))
self.assertEqual(response.code, HTTPStatus.OK.value)
table = loads(response.body.decode())
self.assertEqual(table, {
'big_blind': 2,
'can_join': True,
'current_player': None,
'dealer': None,
'state': 'waiting for players',
'round': 'preflop',
'open_cards': [],
'players': [{
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'a',
'bet': 0,
'position': 1,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'b',
'bet': 0,
'position': 2,
'state': PlayerState.PLAYING.value
}, {
'table_id': 1,
'balance': 0,
'cards': [],
'name': 'c',
'bet': 0,
'position': 5,
'state': PlayerState.PLAYING.value
}],
'pots': [{
'bets': {}
}],
'small_blind': 1
})
class TestJoinController(IntegrationHttpTestCase):
async def async_setup(self):
self.uuid = uuid4()
self.player_name = 'player'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
table = await create_table(max_player_count=2)
self.table_name = table.name
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_join(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.join.side_effect = return_done_future()
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/join'.format(self.table_name),
self.uuid,
body={'position': 1}
)
self.assertEqual(response.code, HTTPStatus.OK.value)
load_mock.assert_called_once_with(self.table_name)
match_mock.join.assert_called_once_with(self.player_name, 1)
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_join_occupied_position(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.join.side_effect = return_done_future(exception=PositionOccupiedError)
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/join'.format(self.table_name),
self.uuid,
body={'position': 1},
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.CONFLICT.value)
@gen_test
async def test_join_missing_parameter(self):
await self.async_setup()
response = await self.post_with_uuid(
'/table/{}/actions/join'.format(self.table_name),
self.uuid,
body={},
raise_error=False)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
@gen_test
async def test_join_invalid_parameter(self):
await self.async_setup()
response = await self.post_with_uuid(
'/table/{}/actions/join'.format(self.table_name),
self.uuid,
body={'position': -1},
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
class TestFoldController(IntegrationHttpTestCase):
async def async_setup(self):
self.uuid = uuid4()
self.player_name = 'player'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
table = await create_table(max_player_count=2)
self.table_name = table.name
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_fold(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.fold.side_effect = return_done_future()
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid('/table/{}/actions/fold'.format(self.table_name), self.uuid)
self.assertEqual(response.code, HTTPStatus.OK.value)
load_mock.assert_called_once_with(self.table_name)
match_mock.fold.assert_called_once_with(self.player_name)
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_fold_invalid_turn(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.fold.side_effect = return_done_future(exception=NotYourTurnError)
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/fold'.format(self.table_name),
self.uuid,
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
class TestCallController(IntegrationHttpTestCase):
async def async_setup(self):
self.uuid = uuid4()
self.player_name = 'player'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
table = await create_table(max_player_count=2)
self.table_name = table.name
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_call(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.call.side_effect = return_done_future()
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid('/table/{}/actions/call'.format(self.table_name), self.uuid)
self.assertEqual(response.code, HTTPStatus.OK.value)
load_mock.assert_called_once_with(self.table_name)
match_mock.call.assert_called_once_with(self.player_name)
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_call_invalid_turn(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.call.side_effect = return_done_future(exception=NotYourTurnError)
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/call'.format(self.table_name),
self.uuid,
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
class TestCheckController(IntegrationHttpTestCase):
async def async_setup(self):
self.uuid = uuid4()
self.player_name = 'player'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
table = await create_table(max_player_count=2)
self.table_name = table.name
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_check(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.check.side_effect = return_done_future()
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid('/table/{}/actions/check'.format(self.table_name), self.uuid)
self.assertEqual(response.code, HTTPStatus.OK.value)
load_mock.assert_called_once_with(self.table_name)
match_mock.check.assert_called_once_with(self.player_name)
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_check_invalid_turn(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.check.side_effect = return_done_future(exception=InvalidTurnError)
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/check'.format(self.table_name),
self.uuid,
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
class TestRaiseController(IntegrationHttpTestCase):
async def async_setup(self):
self.uuid = uuid4()
self.player_name = 'player'
await UUIDsRelation.add_uuid(self.uuid, self.player_name)
table = await create_table(max_player_count=2)
self.table_name = table.name
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_raise(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.raise_bet.side_effect = return_done_future()
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/raise'.format(self.table_name),
self.uuid,
body={'amount': 17}
)
self.assertEqual(response.code, HTTPStatus.OK.value)
load_mock.assert_called_once_with(self.table_name)
match_mock.raise_bet.assert_called_once_with(self.player_name, 17)
@patch('pokerserver.controllers.base.BaseController.load_match')
@gen_test
async def test_raise_invalid_turn(self, load_mock):
await self.async_setup()
match_mock = Mock()
match_mock.table.players = []
match_mock.raise_bet.side_effect = return_done_future(exception=NotYourTurnError)
load_mock.side_effect = return_done_future(match_mock)
response = await self.post_with_uuid(
'/table/{}/actions/raise'.format(self.table_name),
self.uuid,
body={'amount': 3},
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
@gen_test
async def test_raise_missing_parameter(self):
await self.async_setup()
response = await self.post_with_uuid(
'/table/{}/actions/raise'.format(self.table_name),
self.uuid,
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
@gen_test
async def test_raise_invalid_parameter(self):
await self.async_setup()
response = await self.post_with_uuid(
'/table/{}/actions/raise?amount=googol'.format(self.table_name),
self.uuid,
raise_error=False
)
self.assertEqual(response.code, HTTPStatus.BAD_REQUEST.value)
| MartinAltmayer/pokerserver | tests/integration/controllers/test_table.py | Python | gpl-3.0 | 14,923 |
#Macros commonly used
_be = "(be|is|am|are|were|was|been|being)"
_intensadj1 = "( absolutely| absurdly| resoundingly | amazingly | awfully | extremely | completely | highly | incredibly | perfectly | quite | really | strikingly | surprisingly | terribly | totally | unbelievably | hugely | unnaturally | unusually | utterly | very | tremendously | spectacularly)"
_intensadv1= "{ absolutely| absurdly| resoundingly| amazingly| awfully| extremely| completely| highly| incredibly| perfectly| quite| really| strikingly| surprisingly| terribly| totally| unbelievably| hugely| unnaturally| unusually| utterly| very| tremendously| spectacularly}"
_intensadj1 = "( absolute| extreme| incredible| perfect| phenomenal| spectacular| huge| major| tremendous| complete| considerable| real| terrible| total| unbelievable| utter| great| resounding)"
_have = "(have|has|had|having)"
_GONNA = "( am going to| are going to| is going to| am gonna| are gonna| is gonna)"
_GONNANEG= "( am not going to| are not going to| is not going to| am not gonna| are not gonna| is not gonna| ain\'t gonna| isn\'t gonna| aren\'t gonna)"
_GONNACL = "( i\'m going to| they\'re going to| she\'s going to| it\'s going to| we\'re going to| i\'m gonna| you\'re gonna| i\'m gonna| you\'re gonna| he\'s gonna| she\'s gonna| it\'s gonna| we\'re gonna| they\'re gonna| that\'s gonnar)"
_GONNANEGCL = "( i\'m not going to| they\'re not going to| she\'s not going to| it\'s not going to| we\'re not going to| i\'m not gonna| you\'re not gonna| he\'s not gonna| she\'s not gonna| it\'s not gonna| we\'re not gonna| they\'re not gonna| that\'s not gonna)"
_pronsubj = "(i| you| he| she| it| we| they)"
_emo1v= "(like| adore| want| prefer| love| enjoy)"
| raphaottoni/arglex | arglex/macros.py | Python | gpl-3.0 | 1,719 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import pisilinux.context as ctx
import pisilinux.db.lazydb as lazydb
import pisilinux.history
class HistoryDB(lazydb.LazyDB):
def init(self):
self.__logs = self.__generate_history()
self.history = pisilinux.history.History()
def __generate_history(self):
logs = [x for x in os.listdir(ctx.config.history_dir()) if x.endswith(".xml")]
logs.sort(lambda x,y:int(x.split("_")[0]) - int(y.split("_")[0]))
logs.reverse()
return logs
def create_history(self, operation):
self.history.create(operation)
def add_and_update(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.add_package(pkgBefore, pkgAfter, operation, otype)
self.update_history()
def add_package(self, pkgBefore=None, pkgAfter=None, operation=None, otype=None):
self.history.add(pkgBefore, pkgAfter, operation, otype)
def load_config(self, operation, package):
config_dir = os.path.join(ctx.config.history_dir(), "%03d" % operation, package)
if os.path.exists(config_dir):
import distutils.dir_util as dir_util
dir_util.copy_tree(config_dir, "/")
def save_config(self, package, config_file):
hist_dir = os.path.join(ctx.config.history_dir(), self.history.operation.no, package)
if os.path.isdir(config_file):
os.makedirs(os.path.join(hist_dir, config_file))
return
destdir = os.path.join(hist_dir, config_file[1:])
pisilinux.util.copy_file_stat(config_file, destdir);
def update_repo(self, repo, uri, operation = None):
self.history.update_repo(repo, uri, operation)
self.update_history()
def update_history(self):
self.history.update()
def get_operation(self, operation):
for log in self.__logs:
if log.startswith("%03d_" % operation):
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
return hist.operation
return None
def get_package_config_files(self, operation, package):
package_path = os.path.join(ctx.config.history_dir(), "%03d/%s" % (operation, package))
if not os.path.exists(package_path):
return None
configs = []
for root, dirs, files in os.walk(package_path):
for f in files:
configs.append(("%s/%s" % (root, f)))
return configs
def get_config_files(self, operation):
config_path = os.path.join(ctx.config.history_dir(), "%03d" % operation)
if not os.path.exists(config_path):
return None
allconfigs = {}
packages = os.listdir(config_path)
for package in packages:
allconfigs[package] = self.get_package_config_files(operation, package)
return allconfigs
def get_till_operation(self, operation):
if not [x for x in self.__logs if x.startswith("%03d_" % operation)]:
return
for log in self.__logs:
if log.startswith("%03d_" % operation):
return
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last(self, count=0):
count = count or len(self.__logs)
for log in self.__logs[:count]:
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), log))
hist.operation.no = int(log.split("_")[0])
yield hist.operation
def get_last_repo_update(self, last=1):
repoupdates = [l for l in self.__logs if l.endswith("repoupdate.xml")]
repoupdates.reverse()
if not len(repoupdates) >= 2:
return None
if last != 1 and len(repoupdates) <= last:
return None
hist = pisilinux.history.History(os.path.join(ctx.config.history_dir(), repoupdates[-last]))
return hist.operation.date
| hknyldz/pisitools | pisilinux/pisilinux/db/historydb.py | Python | gpl-3.0 | 4,426 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
BUTTON_PIN = 11
LED_PIN = 7
def stisknuto_callback(channel):
global sviti
sviti = int(not sviti)
GPIO.output(LED_PIN,sviti)
if sviti == 1:
print "LED dioda ZAPNUTA"
else:
print "LED dioda VYPNUTA"
try:
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.cleanup()
GPIO.setup(BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_PIN,GPIO.OUT)
GPIO.output(LED_PIN,0)
sviti = 0
print "Tlačítko zap/vyp LED diodu"
print "program ukončete [CTRL]+[C]"
print "Stiskni tlačítko"
GPIO.add_event_detect(BUTTON_PIN,GPIO.RISING,callback=stisknuto_callback,bouncetime=300)
GPIO.output(LED_PIN,sviti)
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
print
print "Program ukončen" | HellTech/NAG_IoE_2016 | 30_HellTech_1512_1/04_tlacitko_zhasnuti/04_zap_vyp_event_detect.py | Python | gpl-3.0 | 867 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import AbinsModules
class CalculateS(object):
"""
Class producer for generating required S calculator
Currently available S calculators:
* SPowderSemiEmpiricalCalculator
"""
@staticmethod
def init(filename=None, temperature=None, sample_form=None, abins_data=None, instrument=None,
quantum_order_num=None, bin_width=1.0):
"""
:param filename: name of input DFT file (CASTEP: foo.phonon)
:param temperature: temperature in K for which calculation of S should be done
:param sample_form: form in which experimental sample is: Powder or SingleCrystal (str)
:param abins_data: object of type AbinsData with data from phonon file
:param instrument: object of type Instrument for which simulation should be performed
:param quantum_order_num: number of quantum order events taken into account during the simulation
:param bin_width: width of bins in wavenumber
"""
if sample_form in AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS:
if sample_form == "Powder":
return AbinsModules.SPowderSemiEmpiricalCalculator(filename=filename, temperature=temperature,
abins_data=abins_data, instrument=instrument,
quantum_order_num=quantum_order_num,
bin_width=bin_width)
# TODO: implement numerical powder averaging
# elif sample == "SingleCrystal": #TODO implement single crystal scenario
else:
raise ValueError("Only implementation for sample in the form of powder is available.")
else:
raise ValueError("Invalid sample form %s" % sample_form)
| mganeva/mantid | scripts/AbinsModules/CalculateS.py | Python | gpl-3.0 | 2,205 |
# Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flexlay.wip.bitmap_layer import BitmapLayer
from flexlay.blend_func import BlendFunc
from flexlay import DrawerProperties
from flexlay.math import Rect, Point, Size, Origin
class Surface:
pass
class SpriteStrokeDrawer:
def __init__(self, drawer):
self.mode = SpriteStrokeDrawer.DM_NORMAL
self.drawer = drawer
def draw(self, stroke, gc):
if DrawerProperties.current.get_brush().is_null() or stroke.get_dab_count() == 0:
return
dabs = stroke.get_interpolated_dabs(DrawerProperties.current.get_spacing() *
DrawerProperties.current.get_size(),
DrawerProperties.current.get_spacing() *
DrawerProperties.current.get_size())
for i, dab in enumerate(self.dabs):
sprite = DrawerProperties.current.get_brush().get_sprite()
color = DrawerProperties.current.get_color()
sprite.set_color(color)
sprite.set_alpha((color.get_alpha() / 255.0) * dab.pressure)
sprite.set_scale(DrawerProperties.current.get_size() * dab.pressure,
DrawerProperties.current.get_size() * dab.pressure)
if gc is not None:
# DO Multipass:
# 1: GL_ZERO, GL_DST_ALPHA
# 2: GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA
# brush.set_blend_func_separate(BlendFunc.zero, BlendFunc.dst_alpha,
# BlendFunc.zero, BlendFunc.one)
# brush.draw(dab.pos.x, dab.pos.y, gc)
if self.mode == SpriteStrokeDrawer.DM_NORMAL:
sprite.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha,
BlendFunc.one, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ADDITION:
sprite.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one,
BlendFunc.zero, BlendFunc.one)
# BlendFunc.one, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ERASE:
sprite.set_blend_func(BlendFunc.zero, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_SMUDGE:
if dab != dabs[0]:
canvas = BitmapLayer.current.get_canvas()
buffer = canvas.get_pixeldata(
Rect(Point(int(self.dabs[i - 1].pos.x) - sprite.width / 2,
int(self.dabs[i - 1].pos.y) - sprite.height / 2),
Size(sprite.width, sprite.height)))
surface = Surface(buffer)
# surface.set_blend_func_separate(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha,
# BlendFunc.one, BlendFunc.zero)
surface.set_alignment(Origin.center)
surface.set_alpha(0.5)
# surface.set_scale(DrawerProperties.current.get_size(),
# DrawerProperties.current.get_size())
surface.draw(dab.pos.x, dab.pos.y, gc.gc)
else:
print("Error: SpriteStrokeDrawer: Unknown draw mode: ", self.mode)
else:
if self.mode == SpriteStrokeDrawer.DM_NORMAL:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ADDITION:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_ERASE:
sprite.set_blend_func(BlendFunc.zero, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
elif self.mode == SpriteStrokeDrawer.DM_SMUDGE:
sprite.set_blend_func(BlendFunc.src_alpha, BlendFunc.one_minus_src_alpha)
sprite.draw(dab.pos.x, dab.pos.y, gc.gc)
else:
print("Error: SpriteStrokeDrawer: Unknown draw mode:", self.mode)
def set_mode(self, mode):
self.mode = mode
def get_mode(self):
return self.mode
# EOF #
| SuperTux/flexlay | flexlay/wip/sprite_stroke_drawer.py | Python | gpl-3.0 | 5,541 |
from __future__ import absolute_import
import os.path
import requests
import wtforms
from wtforms import validators
from ..forms import TextDatasetForm
from origae import utils
from origae.utils.forms import validate_required_iff, validate_greater_than
class TextClassificationDatasetForm(TextDatasetForm):
"""
Defines the form used to create a new TextClassificationDatasetJob
"""
backend = wtforms.SelectField('DB backend',
choices=[
('lmdb', 'LMDB'),
('hdf5', 'HDF5')
],
default='lmdb',
)
def validate_backend(form, field):
if field.data == 'lmdb':
form.compression.data = 'none'
elif field.data == 'tfrecords':
form.compression.data = 'none'
elif field.data == 'hdf5':
form.encoding.data = 'none'
compression = utils.forms.SelectField(
'DB compression',
choices=[
('none', 'None'),
('gzip', 'GZIP'),
],
default='none',
tooltip=('Compressing the dataset may significantly decrease the size '
'of your database files, but it may increase read and write times.'),
)
# Use a SelectField instead of a HiddenField so that the default value
# is used when nothing is provided (through the REST API)
method = wtforms.SelectField(u'Dataset type',
choices=[
('folder', 'Folder'),
('textfile', 'Textfiles'),
],
default='folder',
)
def validate_folder_path(form, field):
if not field.data:
pass
elif utils.is_url(field.data):
# make sure the URL exists
try:
r = requests.get(field.data,
allow_redirects=False,
timeout=utils.HTTP_TIMEOUT)
if r.status_code not in [requests.codes.ok, requests.codes.moved, requests.codes.found]:
raise validators.ValidationError('URL not found')
except Exception as e:
raise validators.ValidationError('Caught %s while checking URL: %s' % (type(e).__name__, e))
else:
return True
else:
# make sure the filesystem path exists
# and make sure the filesystem path is absolute
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('Folder does not exist')
elif not os.path.isabs(field.data):
raise validators.ValidationError('Filesystem path is not absolute')
else:
return True
#
# Method - folder
#
folder_train = utils.forms.StringField(
u'Training Images',
validators=[
validate_required_iff(method='folder'),
validate_folder_path,
],
tooltip=('Indicate a folder which holds subfolders full of images. '
'Each subfolder should be named according to the desired label for the images that it holds. '
'Can also be a URL for an apache/nginx auto-indexed folder.'),
)
folder_pct_val = utils.forms.IntegerField(
u'% for validation',
default=25,
validators=[
validate_required_iff(method='folder'),
validators.NumberRange(min=0, max=100)
],
tooltip=('You can choose to set apart a certain percentage of images '
'from the training images for the validation set.'),
)
folder_pct_test = utils.forms.IntegerField(
u'% for testing',
default=0,
validators=[
validate_required_iff(method='folder'),
validators.NumberRange(min=0, max=100)
],
tooltip=('You can choose to set apart a certain percentage of images '
'from the training images for the test set.'),
)
folder_train_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1),
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_train_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_train_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
has_val_folder = wtforms.BooleanField(
'Separate validation images folder',
default=False,
validators=[
validate_required_iff(method='folder')
]
)
folder_val = wtforms.StringField(
u'Validation Images',
validators=[
validate_required_iff(
method='folder',
has_val_folder=True),
]
)
folder_val_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1),
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_val_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_val_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
has_test_folder = wtforms.BooleanField(
'Separate test images folder',
default=False,
validators=[
validate_required_iff(method='folder')
]
)
folder_test = wtforms.StringField(
u'Test Images',
validators=[
validate_required_iff(
method='folder',
has_test_folder=True),
validate_folder_path,
]
)
folder_test_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1)
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_test_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_test_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
#
# Method - textfile
#
textfile_use_local_files = wtforms.BooleanField(
u'Use local files',
default=False,
)
textfile_train_images = utils.forms.FileField(
u'Training images',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=False)
]
)
textfile_local_train_images = wtforms.StringField(
u'Training images',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=True)
]
)
textfile_train_folder = wtforms.StringField(u'Training images folder')
def validate_textfile_train_folder(form, field):
if form.method.data != 'textfile':
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
textfile_use_val = wtforms.BooleanField(u'Validation set',
default=True,
validators=[
validate_required_iff(method='textfile')
]
)
textfile_val_images = utils.forms.FileField(u'Validation images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_val=True,
textfile_use_local_files=False)
]
)
textfile_local_val_images = wtforms.StringField(u'Validation images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_val=True,
textfile_use_local_files=True)
]
)
textfile_val_folder = wtforms.StringField(u'Validation images folder')
def validate_textfile_val_folder(form, field):
if form.method.data != 'textfile' or not form.textfile_use_val.data:
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
textfile_use_test = wtforms.BooleanField(u'Test set',
default=False,
validators=[
validate_required_iff(method='textfile')
]
)
textfile_test_images = utils.forms.FileField(u'Test images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_test=True,
textfile_use_local_files=False)
]
)
textfile_local_test_images = wtforms.StringField(u'Test images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_test=True,
textfile_use_local_files=True)
]
)
textfile_test_folder = wtforms.StringField(u'Test images folder')
def validate_textfile_test_folder(form, field):
if form.method.data != 'textfile' or not form.textfile_use_test.data:
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
# Can't use a BooleanField here because HTML doesn't submit anything
# for an unchecked checkbox. Since we want to use a REST API and have
# this default to True when nothing is supplied, we have to use a
# SelectField
textfile_shuffle = utils.forms.SelectField(
'Shuffle lines',
choices=[
(1, 'Yes'),
(0, 'No'),
],
coerce=int,
default=1,
tooltip="Shuffle the list[s] of images before creating the database."
)
textfile_labels_file = utils.forms.FileField(
u'Labels',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=False)
],
tooltip=("The 'i'th line of the file should give the string label "
"associated with the '(i-1)'th numeric label. (E.g. the string label "
"for the numeric label 0 is supposed to be on line 1.)"),
)
textfile_local_labels_file = utils.forms.StringField(
u'Labels',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=True)
],
tooltip=("The 'i'th line of the file should give the string label "
"associated with the '(i-1)'th numeric label. (E.g. the string label "
"for the numeric label 0 is supposed to be on line 1.)"),
)
| winnerineast/Origae-6 | origae/dataset/text/classification/forms.py | Python | gpl-3.0 | 14,708 |
from spectral_cube import SpectralCube
import glob
from astropy.convolution import Box1DKernel
import astropy.units as u
from corner import corner
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as ss
empdir = '/mnt/space/erosolow/surveys/empire/'
degasdir = '/mnt/space/erosolow/surveys/DEGAS/'
maskdir = '/mnt/space/erosolow/surveys/DEGAS/masks/'
applymask = True
gals = ['ngc2903','ngc4321','ngc5055','ngc6946']
# gals = gals[-1:]
for g in gals:
for species in ['HCN','HCOp','13CO','C18O']:
# degas = SpectralCube.read(degasdir + g.upper() + '/images/{0}_{1}_rebase7_smooth1.3_hanning1.fits'.format(g.upper(), species))
try:
degas = SpectralCube.read(degasdir + '/IR6p0/{0}_{1}_rebase7_smooth1.3_hanning1.fits'.format(g.upper(), species))
except:
continue
fl = glob.glob(empdir + 'empire_{0}_{1}_*.fits'.format(g, species.lower()))
empire = SpectralCube.read(fl[0])
dv_ratio = ((degas.spectral_axis[1]-degas.spectral_axis[0]) / (empire.spectral_axis[1] - empire.spectral_axis[0])).to(u.dimensionless_unscaled).value
if dv_ratio > 1:
kernel = Box1DKernel(dv_ratio)
empire = empire.spectral_smooth(kernel)
empire = empire.spectral_interpolate(degas.spectral_axis)
degas = degas.convolve_to(empire.beam)
empire = empire.reproject(degas.header)
emp = empire.filled_data[:].value
deg = degas.filled_data[:].value
p999 = np.nanpercentile(deg, 99.9) * 2
p001 = np.nanpercentile(deg, 0.1) * 1.5
if applymask:
mask = SpectralCube.read(maskdir
+ '{0}_12CO_mask.fits'.format(g.upper()))
mask.allow_huge_operations=True
mask = mask.spectral_interpolate(degas.spectral_axis)
mask = mask.reproject(degas.header, order='nearest-neighbor')
mask = mask.filled_data[:].value
mask = mask > 0
idx = np.isfinite(emp) * np.isfinite(deg) * (mask)
else:
idx = np.isfinite(emp) * np.isfinite(deg)
topfrac = np.logical_or((emp > np.nanpercentile(emp, 99)),
(deg > np.nanpercentile(deg, 99)))
medrat = np.nanmedian(deg[topfrac] / emp[topfrac])
val, bins, _ = ss.binned_statistic(emp[idx], deg[idx],
bins=np.linspace(p001, p999, 31),
statistic='median')
yval, ybins, _ = ss.binned_statistic(deg[idx], emp[idx],
bins=np.linspace(p001, p999, 31),
statistic='median')
xctr = 0.5 * (bins[0:-1] + bins[1:])
yctr = 0.5 * (ybins[0:-1] + ybins[1:])
f = corner(np.c_[emp[idx], deg[idx]], bins=100)
f.axes[2].set_xlim([p001, p999])
f.axes[2].set_ylim([p001, p999])
f.axes[0].set_xlim([p001, p999])
f.axes[3].set_xlim([p001, p999])
f.axes[2].set_xlabel('EMPIRE')
f.axes[2].set_ylabel('DEGAS')
f.axes[2].plot([p001,p999], [p001,p999], color='r',
linewidth=3, alpha=0.4)
f.axes[2].plot([p001, p999], [p001 * medrat,
p999 * medrat],
color='b',linewidth=3, linestyle='--', alpha=0.4)
f.axes[2].plot(xctr, val, 'ro', markersize=4)
f.axes[2].plot(yval, yctr,'bx',markersize=4)
f.text(0.6, 0.76,
'{0} {1}'.format(g.upper(), species), transform=plt.gcf().transFigure)
f.text(0.6, 0.68, 'Median DEGAS/EMPIRE: {0:4.2f}'.format(medrat))
f.set_size_inches(6,6)
f.savefig('DEGAS_vs_EMPIRE_{0}_{1}.pdf'.format(g, species))
degas.write('/mnt/space/erosolow/degas_{0}_{1}.fits'.format(g,species), overwrite=True)
empire.write('/mnt/space/erosolow/empire_{0}_{1}.fits'.format(g,species), overwrite=True)
# import pdb; pdb.set_trace()
# degas
# empire = SpectralCube.read('../../../empire/EMPIRE_ngc4321_hcn_33as.fits')
# ls
# empire = SpectralCube.read('../../../empire/EMPIRE_ngc4321_hcn_33as.fits')
# run /home/erosolow/sanitize
# empire = SpectralCube.read('../../../empire/EMPIRE_ngc4321_hcn_33as.fits')
# empire
# import astropy.units as u
# degas_con = degas.convolve_to(33*u.arcsec)
# ?degas.convolve_to
# from radio_beam import Beam
# degas_con = degas.convolve_to(Beam(major = 33*u.arcsec, minor=33*u.arcsec, pa=0*u.deg))
# empire_reproj = empire.reproject(degas_con.header)
# empire_reproj
# degas_con
# plt.hexbin(empire_reproj.filled_data[:],degas_con.filled_data[:])
# import matplotlib.pyplot as plt
# plt.hexbin(empire_reproj.filled_data[:],degas_con.filled_data[:])
# plt.show()
# plt.hexbin(empire_reproj.filled_data[:],degas_con.filled_data[:],logstretch=True)
# ?plt.hexbin
# plt.hexbin(empire_reproj.filled_data[:],degas_con.filled_data[:],bins='log')
# plt.show()
# from corner import corner
# corner(empire_reproj.filled_data[:].ravel(), degas_con.filled_data[:].ravel())
# ?corner
# corner(np.c_[empire_reproj.filled_data[:].ravel(), degas_con.filled_data[:].ravel()])
# import numpy as np
# corner(np.c_[empire_reproj.filled_data[:].ravel(), degas_con.filled_data[:].ravel()])
# corner(np.c_[empire_reproj.filled_data[:].ravel().value, degas_con.filled_data[:].ravel().value])
# emp = =empire_reproj.filled_data[:].value
# emp =empire_reproj.filled_data[:].value
# deg = degas_con.filled_data[:].value
# idx = np.isfinite(emp) * np.isfinite(deg)
# corner(np.c_[emp[idx], deg[idx]])
# plt.show()
# ?corner
# corner(np.c_[emp[idx], deg[idx]])
# plt.set_xrange([-0.006, 0.012])
# plt.set_xlim([-0.006, 0.012])
# f = corner(np.c_[emp[idx], deg[idx]])
# f.axes
# f.axes[2].set_xlim([-0.006, 0.02])
# f.axes[2].set_ylim([-0.006, 0.02])
# plt.show()
# f = corner(np.c_[emp[idx], deg[idx]/0.3],bins=100)
# f.axes[2].set_xlim([-0.006, 0.02])
# f.axes[2].set_ylim([-0.006, 0.02])
# f.axes[0].set_xlim([-0.006, 0.02])
# f.axes[3].set_xlim([-0.006, 0.02])
# f.axes[2].set_xlabel('EMPIRE')
# f.axes[2].set_ylabel('DEGAS')
# f.set_size_inches(6,6)
# f.savefig('DEGAS_vs_EMPIRE_NGC4321_HCN.pdf')
| low-sky/degas | degas/examples/compare_cube.py | Python | gpl-3.0 | 6,219 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class CreateFormTestTemplate(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://kf.kbtdev.org/"
self.verificationErrors = []
self.accept_next_alert = True
def test_create_form_test_template(self):
driver = self.driver
driver.get(self.base_url + "")
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".forms-header__title"): break
except: pass
time.sleep(1)
else: self.fail("time out")
self.assertFalse(self.is_element_present(By.CSS_SELECTOR, ".forms__card"))
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".forms-empty__button"))
driver.find_element_by_css_selector(".forms-empty__button").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".forms__addform__start"): break
except: pass
time.sleep(1)
else: self.fail("time out")
# Click the form creation button using JavaScript to avoid element not visible errors.
# WARNING: The 'runScript' command doesn't export to python, so a manual edit is necessary.
# ERROR: Caught exception [ERROR: Unsupported command [runScript | $(".forms__addform__start").click(); | ]]
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".form-title"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".form-title").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".survey-header__title input"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".survey-header__title input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE)
driver.find_element_by_css_selector(".survey-header__title input").send_keys("Selenium test form title.", Keys.ENTER)
self.assertEqual("Selenium test form title.", driver.find_element_by_css_selector(".form-title").text)
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".survey-editor .fa-plus"))
driver.find_element_by_css_selector(".survey-editor .fa-plus").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".row__questiontypes__form > input"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".row__questiontypes__form > input").send_keys("Selenium test question label.", Keys.TAB)
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".row__questiontypes__form > button"))
driver.find_element_by_css_selector(".row__questiontypes__form > button").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".questiontypelist__item[data-menu-item=\"select_one\"]"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".questiontypelist__item[data-menu-item=\"select_one\"]").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(1) span"): break
except: pass
time.sleep(1)
else: self.fail("time out")
self.assertEqual("Selenium test question label.", driver.find_element_by_css_selector(".card__header-title").text)
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) .editable-wrapper span:first-child").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(1) input"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE)
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) input").send_keys("Selenium test question choice 1.", Keys.ENTER)
self.assertEqual("Selenium test question choice 1.", driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(1) span").text)
self.assertTrue(self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(2) span"))
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) span").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".card--selectquestion__expansion li:nth-child(2) input"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) input").send_keys(Keys.SHIFT, Keys.END, Keys.SHIFT, Keys.DELETE)
driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) input").send_keys("Selenium test question choice 2.", Keys.ENTER)
self.assertEqual("Selenium test question choice 2.", driver.find_element_by_css_selector(".card--selectquestion__expansion li:nth-child(2) span").text)
self.assertTrue(self.is_element_present(By.ID, "save"))
driver.find_element_by_id("save").click()
for i in range(60):
try:
if self.is_element_present(By.CSS_SELECTOR, ".forms__card__title"): break
except: pass
time.sleep(1)
else: self.fail("time out")
self.assertEqual("Selenium test form title.", driver.find_element_by_css_selector(".forms__card__title").text)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| kobotoolbox/kobo_selenium_tests | kobo_selenium_tests/selenium_ide_exported/create_form_test_template.py | Python | gpl-3.0 | 7,253 |
# PPFem: An educational finite element code
# Copyright (C) 2015 Matthias Rambausek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ppfem.geometry.point import Point
from ppfem.elements.base import ReferenceElement
from ppfem.elements.lagrange_basis import LagrangeBasis
import numpy as np
class LagrangeElement(ReferenceElement):
def __init__(self, degree, dimension=1):
ReferenceElement.__init__(self, degree, dimension)
def interpolate_function(self, function, mapping=None):
"""
This implementation shows the characteristic property of Lagrange Elements!
:param function: a callable f(p) where p is a Point (or a coordinate array of size space_dim()) and the result
is of dimension dimension()
:param mapping: a Mapping instance to compute the "physical" coordinates of a point in reference space
"""
if mapping is not None:
points = mapping.map_points(self.get_support_points())
else:
points = self.get_support_points()
return np.array([function(p.coords()) for p in points])
def function_value(self, dof_values, point):
# first array axis corresponds to basis function!
if self._dimension == 1:
return np.dot(self.basis_function_values(point).reshape(1, self._n_bases), dof_values)
else:
return np.einsum('ijk,ijk->jk', dof_values, self.basis_function_values(point))
def function_gradient(self, dof_values, point, jacobian_inv=None):
# first array axis corresponds to basis function!
if self._dimension == 1:
return np.dot(self.basis_function_gradients(point, jacobian_inv=jacobian_inv).reshape(dof_values.shape).T,
dof_values)
elif self.space_dim() > 1:
return np.einsum('ijk,ijkl->jkl',
dof_values,
self.basis_function_gradients(point, jacobian_inv=jacobian_inv))
elif self.space_dim() == 1:
return np.einsum('ijk,ijk->jk',
dof_values,
self.basis_function_gradients(point, jacobian_inv=jacobian_inv))
class LagrangeLine(LagrangeElement):
def __init__(self, degree, dimension=1):
LagrangeElement.__init__(self, degree, dimension=dimension)
def _setup_basis(self):
support_points = self.get_support_points()
self._n_bases = len(support_points)
self._n_dofs = self._n_bases * self._dimension
self._n_internal_dofs = self._n_dofs - 2
self._basis_functions = [LagrangeBasis(support_points, i, dimension=self._dimension)
for i in range(len(support_points))]
def get_support_points(self):
n = self._degree + 1
return [Point(-1), Point(1)] + [Point(-1 + i * 2/(n-1), index=i) for i in range(1, n-1)]
@staticmethod
def space_dim():
return 1
| mrambausek/PPFem | ppfem/elements/lagrange_elements.py | Python | gpl-3.0 | 3,551 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import getdate
from erpnext.accounts.doctype.sales_invoice.test_sales_invoice import create_sales_invoice
from erpnext.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice
from erpnext.stock.doctype.item.test_item import make_item
import json
class TestGSTR3BReport(unittest.TestCase):
def test_gstr_3b_report(self):
month_number_mapping = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
frappe.set_user("Administrator")
frappe.db.sql("delete from `tabSales Invoice` where company='_Test Company GST'")
frappe.db.sql("delete from `tabPurchase Invoice` where company='_Test Company GST'")
frappe.db.sql("delete from `tabGSTR 3B Report` where company='_Test Company GST'")
make_company()
make_item("Milk", properties = {"is_nil_exempt": 1, "standard_rate": 0.000000})
set_account_heads()
make_customers()
make_suppliers()
make_sales_invoice()
create_purchase_invoices()
if frappe.db.exists("GSTR 3B Report", "GSTR3B-March-2019-_Test Address-Billing"):
report = frappe.get_doc("GSTR 3B Report", "GSTR3B-March-2019-_Test Address-Billing")
report.save()
else:
report = frappe.get_doc({
"doctype": "GSTR 3B Report",
"company": "_Test Company GST",
"company_address": "_Test Address-Billing",
"year": getdate().year,
"month": month_number_mapping.get(getdate().month)
}).insert()
output = json.loads(report.json_output)
self.assertEqual(output["sup_details"]["osup_det"]["iamt"], 18),
self.assertEqual(output["sup_details"]["osup_zero"]["iamt"], 18),
self.assertEqual(output["inter_sup"]["unreg_details"][0]["iamt"], 18),
self.assertEqual(output["sup_details"]["osup_nil_exmp"]["txval"], 100),
self.assertEqual(output["inward_sup"]["isup_details"][0]["inter"], 250)
self.assertEqual(output["itc_elg"]["itc_avl"][4]["iamt"], 45)
def make_sales_invoice():
si = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
si.submit()
si1 = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST SEZ Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si1.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
si1.submit()
si2 = create_sales_invoice(company="_Test Company GST",
customer = '_Test Unregistered Customer',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si2.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
si2.submit()
si3 = create_sales_invoice(company="_Test Company GST",
customer = '_Test GST Customer',
currency = 'INR',
item = 'Milk',
warehouse = 'Finished Goods - _GST',
debit_to = 'Debtors - _GST',
income_account = 'Sales - _GST',
expense_account = 'Cost of Goods Sold - _GST',
cost_center = 'Main - _GST',
do_not_save=1
)
si3.submit()
def create_purchase_invoices():
pi = make_purchase_invoice(
company="_Test Company GST",
supplier = '_Test Registered Supplier',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
cost_center = 'Main - _GST',
do_not_save=1,
)
pi.eligibility_for_itc = "All Other ITC"
pi.append("taxes", {
"charge_type": "On Net Total",
"account_head": "IGST - _GST",
"cost_center": "Main - _GST",
"description": "IGST @ 18.0",
"rate": 18
})
pi.submit()
pi1 = make_purchase_invoice(
company="_Test Company GST",
supplier = '_Test Registered Supplier',
currency = 'INR',
warehouse = 'Finished Goods - _GST',
cost_center = 'Main - _GST',
item = "Milk",
do_not_save=1
)
pi1.submit()
def make_suppliers():
if not frappe.db.exists("Supplier", "_Test Registered Supplier"):
frappe.get_doc({
"supplier_group": "_Test Supplier Group",
"supplier_name": "_Test Registered Supplier",
"gst_category": "Registered Regular",
"supplier_type": "Individual",
"doctype": "Supplier",
}).insert()
if not frappe.db.exists("Supplier", "_Test Unregistered Supplier"):
frappe.get_doc({
"supplier_group": "_Test Supplier Group",
"supplier_name": "_Test Unregistered Supplier",
"gst_category": "Unregistered",
"supplier_type": "Individual",
"doctype": "Supplier",
}).insert()
if not frappe.db.exists('Address', '_Test Supplier GST-1-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Supplier GST-1",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "29AACCV0498C1Z9",
"gst_state": "Karnataka",
}).insert()
address.append("links", {
"link_doctype": "Supplier",
"link_name": "_Test Registered Supplier"
})
address.save()
if not frappe.db.exists('Address', '_Test Supplier GST-2-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Supplier GST-2",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Karnataka",
}).insert()
address.append("links", {
"link_doctype": "Supplier",
"link_name": "_Test Unregistered Supplier"
})
address.save()
def make_customers():
if not frappe.db.exists("Customer", "_Test GST Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test GST Customer",
"gst_category": "Registered Regular",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists("Customer", "_Test GST SEZ Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test GST SEZ Customer",
"gst_category": "SEZ",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists("Customer", "_Test Unregistered Customer"):
frappe.get_doc({
"customer_group": "_Test Customer Group",
"customer_name": "_Test Unregistered Customer",
"gst_category": "Unregistered",
"customer_type": "Individual",
"doctype": "Customer",
"territory": "_Test Territory"
}).insert()
if not frappe.db.exists('Address', '_Test GST-1-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-1",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "29AZWPS7135H1ZG",
"gst_state": "Karnataka",
"gst_state_number": "29"
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test GST Customer"
})
address.save()
if not frappe.db.exists('Address', '_Test GST-2-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-2",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Haryana",
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test Unregistered Customer"
})
address.save()
if not frappe.db.exists('Address', '_Test GST-3-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test GST-3",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gst_state": "Gujarat",
}).insert()
address.append("links", {
"link_doctype": "Customer",
"link_name": "_Test GST SEZ Customer"
})
address.save()
def make_company():
if frappe.db.exists("Company", "_Test Company GST"):
return
company = frappe.new_doc("Company")
company.company_name = "_Test Company GST"
company.abbr = "_GST"
company.default_currency = "INR"
company.country = "India"
company.insert()
if not frappe.db.exists('Address', '_Test Address-Billing'):
address = frappe.get_doc({
"address_line1": "_Test Address Line 1",
"address_title": "_Test Address",
"address_type": "Billing",
"city": "_Test City",
"state": "Test State",
"country": "India",
"doctype": "Address",
"is_primary_address": 1,
"phone": "+91 0000000000",
"gstin": "27AAECE4835E1ZR",
"gst_state": "Maharashtra",
"gst_state_number": "27"
}).insert()
address.append("links", {
"link_doctype": "Company",
"link_name": "_Test Company GST"
})
address.save()
def set_account_heads():
gst_settings = frappe.get_doc("GST Settings")
gst_account = frappe.get_all(
"GST Account",
fields=["cgst_account", "sgst_account", "igst_account"],
filters = {"company": "_Test Company GST"})
if not gst_account:
gst_settings.append("gst_accounts", {
"company": "_Test Company GST",
"cgst_account": "CGST - _GST",
"sgst_account": "SGST - _GST",
"igst_account": "IGST - _GST",
})
gst_settings.save()
| brownharryb/erpnext | erpnext/regional/doctype/gstr_3b_report/test_gstr_3b_report.py | Python | gpl-3.0 | 10,519 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2017 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Perform (or assist with) cleaning operations.
"""
from __future__ import absolute_import, print_function
from bleachbit import _, expanduser, expandvars
from bleachbit.FileUtilities import children_in_directory
from bleachbit.Options import options
from bleachbit import Command, FileUtilities, Memory, Special,GuiBasic
import glob
import logging
import os.path
import re
import sys
import warnings
import subprocess
if 'posix' == os.name:
from bleachbit import Unix
elif 'nt' == os.name:
from bleachbit import Windows
# Suppress GTK warning messages while running in CLI #34
warnings.simplefilter("ignore", Warning)
try:
import gtk
HAVE_GTK = True
except:
HAVE_GTK = False
# a module-level variable for holding cleaners
backends = {}
class Cleaner:
"""Base class for a cleaner"""
def __init__(self):
self.actions = []
self.id = None
self.description = None
self.name = None
self.options = {}
self.running = []
self.warnings = {}
def add_action(self, option_id, action):
"""Register 'action' (instance of class Action) to be executed
for ''option_id'. The actions must implement list_files and
other_cleanup()"""
self.actions += ((option_id, action), )
def add_option(self, option_id, name, description):
"""Register option (such as 'cache')"""
self.options[option_id] = (name, description)
def add_running(self, detection_type, pathname):
"""Add a way to detect this program is currently running"""
self.running += ((detection_type, pathname), )
def auto_hide(self):
"""Return boolean whether it is OK to automatically hide this
cleaner"""
for (option_id, __name) in self.get_options():
try:
for cmd in self.get_commands(option_id):
for dummy in cmd.execute(False):
return False
for ds in self.get_deep_scan(option_id):
if isinstance(ds, dict):
return False
except Exception as e:
logger = logging.getLogger(__name__)
logger.exception('exception in auto_hide(), cleaner=%s, option=%s',
self.name, option_id)
return True
def get_commands(self, option_id):
"""Get list of Command instances for option 'option_id'"""
for action in self.actions:
if option_id == action[0]:
for cmd in action[1].get_commands():
yield cmd
if option_id not in self.options:
raise RuntimeError("Unknown option '%s'" % option_id)
def get_deep_scan(self, option_id):
"""Get dictionary used to build a deep scan"""
for action in self.actions:
if option_id == action[0]:
for ds in action[1].get_deep_scan():
yield ds
if option_id not in self.options:
raise RuntimeError("Unknown option '%s'" % option_id)
def get_description(self):
"""Brief description of the cleaner"""
return self.description
def get_id(self):
"""Return the unique name of this cleaner"""
return self.id
def get_name(self):
"""Return the human name of this cleaner"""
return self.name
def get_option_descriptions(self):
"""Yield the names and descriptions of each option in a 2-tuple"""
if self.options:
for key in sorted(self.options.keys()):
yield (self.options[key][0], self.options[key][1])
def get_options(self):
"""Return user-configurable options in 2-tuple (id, name)"""
if self.options:
for key in sorted(self.options.keys()):
yield (key, self.options[key][0])
def get_warning(self, option_id):
"""Return a warning as string."""
if option_id in self.warnings:
return self.warnings[option_id]
else:
return None
def is_running(self):
"""Return whether the program is currently running"""
resp_cli=""
logger = logging.getLogger(__name__)
for running in self.running:
test = running[0]
pathname = running[1]
if 'exe' == test and 'posix' == os.name:
if Unix.is_running(pathname):
#print "debug: process '%s' is running" % pathname
logger.debug("Debug: process '%s' is running", pathname)
if options.get("close_run"):
if not subprocess.mswindows:
#print "debug: Closing process '%s'" % pathname
if "--preset" in sys.argv:
resp_cli = raw_input("Do you Want BleachBit to Close " + pathname + " y/n : ")
else:
resp = GuiBasic.message_dialog(None,"Do you Want BleachBit to Close " + pathname,gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO)
if gtk.RESPONSE_YES == resp or resp_cli.lower() == "y":
# user cancelled, so don't toggle option
logger.debug("Debug: Closing process '%s'",pathname)
subprocess.check_output(["killall", "-9", pathname])
if not Unix.is_running(pathname):
logger.debug("Debug: Closing process '%s' successful",pathname)
return False
return True
elif 'exe' == test and 'nt' == os.name:
if Windows.is_process_running(pathname):
#print "debug: process '%s' is running" % pathname
logger.debug("Debug: process '%s' is running", pathname)
if options.get("close_run"):
if subprocess.mswindows:
#print "debug: Closing process '%s'" % pathname
if "--preset" in sys.argv:
resp_cli = raw_input("Do you Want BleachBit to Close " + pathname + " y/n : ")
else:
resp = GuiBasic.message_dialog(None,"Do you Want BleachBit to Close " + pathname,gtk.MESSAGE_WARNING, gtk.BUTTONS_YES_NO)
if gtk.RESPONSE_YES == resp or resp_cli.lower() == "y":
logger.debug("debug: Closing process '%s'",pathname)
subprocess.check_output(["taskkill", "/IM", pathname])
if not Windows.is_process_running(pathname):
logger.debug("debug: Closing process '%s' successful",pathname)
return False
logger.debug("process '%s' is running", pathname)
return True
elif 'exe' == test and 'nt' == os.name:
if Windows.is_process_running(pathname):
logger.debug("process '%s' is running", pathname)
return True
elif 'pathname' == test:
expanded = expanduser(expandvars(pathname))
for globbed in glob.iglob(expanded):
if os.path.exists(globbed):
logger.debug("file '%s' exists indicating '%s' is running", self.name)
return True
else:
raise RuntimeError(
"Unknown running-detection test '%s'" % test)
return False
def is_usable(self):
"""Return whether the cleaner is usable (has actions)"""
return len(self.actions) > 0
def set_warning(self, option_id, description):
"""Set a warning to be displayed when option is selected interactively"""
self.warnings[option_id] = description
class Firefox(Cleaner):
"""Mozilla Firefox"""
def __init__(self):
Cleaner.__init__(self)
self.add_option('backup', _('Backup files'), _(
'Delete the backup files'))
self.add_option('cache', _('Cache'), _(
'Delete the web cache, which reduces time to display revisited pages'))
self.add_option('cookies', _('Cookies'), _(
'Delete cookies, which contain information such as web site preferences, authentication, and tracking identification'))
self.add_option(
'crash_reports', _('Crash reports'), _('Delete the files'))
# TRANSLATORS: DOM = Document Object Model.
self.add_option('dom', _('DOM Storage'), _('Delete HTML5 cookies'))
self.add_option('download_history', _(
'Download history'), _('List of files downloaded'))
self.add_option('forms', _('Form history'), _(
'A history of forms entered in web sites and in the Search bar'))
self.add_option('session_restore', _('Session restore'), _(
'Loads the initial session after the browser closes or crashes'))
self.add_option('site_preferences', _(
'Site preferences'), _('Settings for individual sites'))
self.add_option('passwords', _('Passwords'), _(
'A database of usernames and passwords as well as a list of sites that should not store passwords'))
self.set_warning(
'passwords', _('This option will delete your saved passwords.'))
self.add_option(
'url_history', _('URL history'), _('List of visited web pages'))
self.add_option('vacuum', _('Vacuum'), _(
'Clean database fragmentation to reduce space and improve speed without removing any data'))
if 'posix' == os.name:
self.profile_dir = "~/.mozilla/firefox*/*.default*/"
self.add_running('exe', 'firefox')
self.add_running('exe', 'firefox-bin')
self.add_running('pathname', self.profile_dir + 'lock')
elif 'nt' == os.name:
self.profile_dir = "$USERPROFILE\\Application Data\\Mozilla\\Firefox\\Profiles\\*.default*\\"
self.add_running('exe', 'firefox.exe')
self.description = _("Web browser")
self.id = 'firefox'
self.name = "Firefox"
def get_commands(self, option_id):
files = []
# backup files
if 'backup' == option_id:
bookmark_bu_dir = os.path.join(self.profile_dir, 'bookmarkbackups')
files += FileUtilities.expand_glob_join(bookmark_bu_dir, "*.json")
files += FileUtilities.expand_glob_join(
bookmark_bu_dir, "*.jsonlz4")
# browser cache
cache_base = None
if 'posix' == os.name:
cache_base = self.profile_dir
elif 'nt' == os.name:
cache_base = "$localappdata\\Mozilla\\Firefox\\Profiles\\*.default*"
if 'cache' == option_id:
dirs = FileUtilities.expand_glob_join(cache_base, "Cache*")
dirs += FileUtilities.expand_glob_join(cache_base, "OfflineCache")
if 'nt' == os.name:
dirs += FileUtilities.expand_glob_join(
cache_base, "jumpListCache") # Windows 8
if 'posix' == os.name:
# This path is whitelisted under the System - Cache cleaner,
# so it can be cleaned here.
dirs += [expanduser('~/.cache/mozilla')]
for dirname in dirs:
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
# Necko Predictive Network Actions
# https://wiki.mozilla.org/Privacy/Reviews/Necko
files += FileUtilities.expand_glob_join(
self.profile_dir, "netpredictions.sqlite")
# cookies
if 'cookies' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "cookies.txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "cookies.sqlite")
# crash reports
if 'posix' == os.name:
crashdir = expanduser("~/.mozilla/firefox/Crash Reports")
if 'nt' == os.name:
crashdir = expandvars(
"$USERPROFILE\\Application Data\\Mozilla\\Firefox\\Crash Reports")
if 'crash_reports' == option_id:
for filename in children_in_directory(crashdir, False):
files += [filename]
files += FileUtilities.expand_glob_join(
self.profile_dir, "minidumps/*.dmp")
# DOM storage
if 'dom' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "webappsstore.sqlite")
# download history
if 'download_history' == option_id:
# Firefox version 1
files += FileUtilities.expand_glob_join(
self.profile_dir, "downloads.rdf")
# Firefox version 3
files += FileUtilities.expand_glob_join(
self.profile_dir, "downloads.sqlite")
# forms
if 'forms' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "formhistory.dat")
files += FileUtilities.expand_glob_join(
self.profile_dir, "formhistory.sqlite")
# passwords
if 'passwords' == option_id:
# http://kb.mozillazine.org/Password_Manager
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons.txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons[2-3].txt")
files += FileUtilities.expand_glob_join(
self.profile_dir, "signons.sqlite")
files += FileUtilities.expand_glob_join(
self.profile_dir, "logins.json")
# session restore
if 'session_restore' == option_id:
# Names include sessionstore.js, sessionstore.bak,
# sessionstore.bak-20140715214327, sessionstore-1.js
files += FileUtilities.expand_glob_join(
self.profile_dir, "sessionstore*.js")
files += FileUtilities.expand_glob_join(
self.profile_dir, "sessionstore.bak*")
ss_bu_dir = os.path.join(self.profile_dir, 'sessionstore-backups')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'previous.js')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'upgrade.js-20*')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'recovery.js')
files += FileUtilities.expand_glob_join(
ss_bu_dir, 'recovery.bak')
# site-specific preferences
if 'site_preferences' == option_id:
files += FileUtilities.expand_glob_join(
self.profile_dir, "content-prefs.sqlite")
# URL history
if 'url_history' == option_id:
# Firefox version 1
files += FileUtilities.expand_glob_join(
self.profile_dir, "history.dat")
# Firefox 21 on Windows
if 'nt' == os.name:
files += FileUtilities.expand_glob_join(
cache_base, "thumbnails/*.png")
# see also function other_cleanup()
# finish
for filename in files:
yield Command.Delete(filename)
# URL history
if 'url_history' == option_id:
for path in FileUtilities.expand_glob_join(self.profile_dir, "places.sqlite"):
yield Command.Function(path,
Special.delete_mozilla_url_history,
_('Delete the usage history'))
# vacuum
if 'vacuum' == option_id:
paths = []
paths += FileUtilities.expand_glob_join(
self.profile_dir, "*.sqlite")
if not cache_base == self.profile_dir:
paths += FileUtilities.expand_glob_join(cache_base, "*.sqlite")
for path in paths:
yield Command.Function(path,
FileUtilities.vacuum_sqlite3, _("Vacuum"))
class OpenOfficeOrg(Cleaner):
"""Delete OpenOffice.org cache"""
def __init__(self):
Cleaner.__init__(self)
self.options = {}
self.add_option('cache', _('Cache'), _('Delete the cache'))
self.add_option('recent_documents', _('Most recently used'), _(
"Delete the list of recently used documents"))
self.id = 'openofficeorg'
self.name = 'OpenOffice.org'
self.description = _("Office suite")
# reference: http://katana.oooninja.com/w/editions_of_openoffice.org
if 'posix' == os.name:
self.prefixes = ["~/.ooo-2.0", "~/.openoffice.org2",
"~/.openoffice.org2.0", "~/.openoffice.org/3"]
self.prefixes += ["~/.ooo-dev3"]
if 'nt' == os.name:
self.prefixes = [
"$APPDATA\\OpenOffice.org\\3", "$APPDATA\\OpenOffice.org2"]
def get_commands(self, option_id):
# paths for which to run expand_glob_join
egj = []
if 'recent_documents' == option_id:
egj.append(
"user/registry/data/org/openoffice/Office/Histories.xcu")
egj.append(
"user/registry/cache/org.openoffice.Office.Histories.dat")
if 'recent_documents' == option_id and not 'cache' == option_id:
egj.append("user/registry/cache/org.openoffice.Office.Common.dat")
for egj_ in egj:
for prefix in self.prefixes:
for path in FileUtilities.expand_glob_join(prefix, egj_):
if 'nt' == os.name:
path = os.path.normpath(path)
if os.path.lexists(path):
yield Command.Delete(path)
if 'cache' == option_id:
dirs = []
for prefix in self.prefixes:
dirs += FileUtilities.expand_glob_join(
prefix, "user/registry/cache/")
for dirname in dirs:
if 'nt' == os.name:
dirname = os.path.normpath(dirname)
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
if 'recent_documents' == option_id:
for prefix in self.prefixes:
for path in FileUtilities.expand_glob_join(prefix, "user/registry/data/org/openoffice/Office/Common.xcu"):
if os.path.lexists(path):
yield Command.Function(path,
Special.delete_ooo_history,
_('Delete the usage history'))
# ~/.openoffice.org/3/user/registrymodifications.xcu
# Apache OpenOffice.org 3.4.1 from openoffice.org on Ubuntu 13.04
# %AppData%\OpenOffice.org\3\user\registrymodifications.xcu
# Apache OpenOffice.org 3.4.1 from openoffice.org on Windows XP
for path in FileUtilities.expand_glob_join(prefix, "user/registrymodifications.xcu"):
if os.path.lexists(path):
yield Command.Function(path,
Special.delete_office_registrymodifications,
_('Delete the usage history'))
class System(Cleaner):
"""Clean the system in general"""
def __init__(self):
Cleaner.__init__(self)
#
# options for Linux and BSD
#
if 'posix' == os.name:
# TRANSLATORS: desktop entries are .desktop files in Linux that
# make up the application menu (the menu that shows BleachBit,
# Firefox, and others. The .desktop files also associate file
# types, so clicking on an .html file in Nautilus brings up
# Firefox.
# More information:
# http://standards.freedesktop.org/menu-spec/latest/index.html#introduction
self.add_option('desktop_entry', _('Broken desktop files'), _(
'Delete broken application menu entries and file associations'))
self.add_option('cache', _('Cache'), _('Delete the cache'))
# TRANSLATORS: Localizations are files supporting specific
# languages, so applications appear in Spanish, etc.
self.add_option('localizations', _('Localizations'), _(
'Delete files for unwanted languages'))
self.set_warning(
'localizations', _("Configure this option in the preferences."))
# TRANSLATORS: 'Rotated logs' refers to old system log files.
# Linux systems often have a scheduled job to rotate the logs
# which means compress all except the newest log and then delete
# the oldest log. You could translate this 'old logs.'
self.add_option(
'rotated_logs', _('Rotated logs'), _('Delete old system logs'))
self.add_option('recent_documents', _('Recent documents list'), _(
'Delete the list of recently used documents'))
self.add_option('trash', _('Trash'), _('Empty the trash'))
#
# options just for Linux
#
if sys.platform.startswith('linux'):
self.add_option('memory', _('Memory'),
# TRANSLATORS: 'free' means 'unallocated'
_('Wipe the swap and free memory'))
self.set_warning(
'memory', _('This option is experimental and may cause system problems.'))
#
# options just for Microsoft Windows
#
if 'nt' == os.name:
self.add_option('logs', _('Logs'), _('Delete the logs'))
self.add_option(
'memory_dump', _('Memory dump'), _('Delete the file memory.dmp'))
self.add_option('muicache', 'MUICache', _('Delete the cache'))
# TRANSLATORS: Prefetch is Microsoft Windows jargon.
self.add_option('prefetch', _('Prefetch'), _('Delete the cache'))
self.add_option(
'recycle_bin', _('Recycle bin'), _('Empty the recycle bin'))
# TRANSLATORS: 'Update' is a noun, and 'Update uninstallers' is an option to delete
# the uninstallers for software updates.
self.add_option('updates', _('Update uninstallers'), _(
'Delete uninstallers for Microsoft updates including hotfixes, service packs, and Internet Explorer updates'))
#
# options for GTK+
#
if HAVE_GTK:
self.add_option('clipboard', _('Clipboard'), _(
'The desktop environment\'s clipboard used for copy and paste operations'))
#
# options common to all platforms
#
# TRANSLATORS: "Custom" is an option allowing the user to specify which
# files and folders will be erased.
self.add_option('custom', _('Custom'), _(
'Delete user-specified files and folders'))
# TRANSLATORS: 'free' means 'unallocated'
self.add_option('free_disk_space', _('Free disk space'),
# TRANSLATORS: 'free' means 'unallocated'
_('Overwrite free disk space to hide deleted files'))
self.set_warning('free_disk_space', _('This option is very slow.'))
self.add_option(
'tmp', _('Temporary files'), _('Delete the temporary files'))
self.description = _("The system in general")
self.id = 'system'
self.name = _("System")
def get_commands(self, option_id):
# This variable will collect fully expanded file names, and
# at the end of this function, they will be checked they exist
# and processed through Command.Delete().
files = []
# cache
if 'posix' == os.name and 'cache' == option_id:
dirname = expanduser("~/.cache/")
for filename in children_in_directory(dirname, True):
if self.whitelisted(filename):
continue
files += [filename]
# custom
if 'custom' == option_id:
for (c_type, c_path) in options.get_custom_paths():
if 'file' == c_type:
files += [c_path]
elif 'folder' == c_type:
files += [c_path]
for path in children_in_directory(c_path, True):
files += [path]
else:
raise RuntimeError(
'custom folder has invalid type %s' % c_type)
# menu
menu_dirs = ['~/.local/share/applications',
'~/.config/autostart',
'~/.gnome/apps/',
'~/.gnome2/panel2.d/default/launchers',
'~/.gnome2/vfolders/applications/',
'~/.kde/share/apps/RecentDocuments/',
'~/.kde/share/mimelnk',
'~/.kde/share/mimelnk/application/ram.desktop',
'~/.kde2/share/mimelnk/application/',
'~/.kde2/share/applnk']
if 'posix' == os.name and 'desktop_entry' == option_id:
for dirname in menu_dirs:
for filename in [fn for fn in children_in_directory(dirname, False)
if fn.endswith('.desktop')]:
if Unix.is_broken_xdg_desktop(filename):
yield Command.Delete(filename)
# unwanted locales
if 'posix' == os.name and 'localizations' == option_id:
for path in Unix.locales.localization_paths(locales_to_keep=options.get_languages()):
if os.path.isdir(path):
for f in FileUtilities.children_in_directory(path, True):
yield Command.Delete(f)
yield Command.Delete(path)
# Windows logs
if 'nt' == os.name and 'logs' == option_id:
paths = (
'$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\*.log',
'$ALLUSERSPROFILE\\Application Data\\Microsoft\\Dr Watson\\user.dmp',
'$LocalAppData\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
'$LocalAppData\\Microsoft\\Windows\WER\\ReportQueue\\*\\*',
'$programdata\\Microsoft\\Windows\\WER\\ReportArchive\\*\\*',
'$programdata\\Microsoft\\Windows\\WER\\ReportQueue\\*\\*',
'$localappdata\\Microsoft\\Internet Explorer\\brndlog.bak',
'$localappdata\\Microsoft\\Internet Explorer\\brndlog.txt',
'$windir\\*.log',
'$windir\\imsins.BAK',
'$windir\\OEWABLog.txt',
'$windir\\SchedLgU.txt',
'$windir\\ntbtlog.txt',
'$windir\\setuplog.txt',
'$windir\\REGLOCS.OLD',
'$windir\\Debug\\*.log',
'$windir\\Debug\\Setup\\UpdSh.log',
'$windir\\Debug\\UserMode\\*.log',
'$windir\\Debug\\UserMode\\ChkAcc.bak',
'$windir\\Debug\\UserMode\\userenv.bak',
'$windir\\Microsoft.NET\Framework\*\*.log',
'$windir\\pchealth\\helpctr\\Logs\\hcupdate.log',
'$windir\\security\\logs\\*.log',
'$windir\\security\\logs\\*.old',
'$windir\\SoftwareDistribution\\*.log',
'$windir\\SoftwareDistribution\\DataStore\\Logs\\*',
'$windir\\system32\\TZLog.log',
'$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.bak',
'$windir\\system32\\config\\systemprofile\\Application Data\\Microsoft\\Internet Explorer\\brndlog.txt',
'$windir\\system32\\LogFiles\\AIT\\AitEventLog.etl.???',
'$windir\\system32\\LogFiles\\Firewall\\pfirewall.log*',
'$windir\\system32\\LogFiles\\Scm\\SCM.EVM*',
'$windir\\system32\\LogFiles\\WMI\\Terminal*.etl',
'$windir\\system32\\LogFiles\\WMI\\RTBackup\EtwRT.*etl',
'$windir\\system32\\wbem\\Logs\\*.lo_',
'$windir\\system32\\wbem\\Logs\\*.log', )
for path in paths:
expanded = expandvars(path)
for globbed in glob.iglob(expanded):
files += [globbed]
# memory
if sys.platform.startswith('linux') and 'memory' == option_id:
yield Command.Function(None, Memory.wipe_memory, _('Memory'))
# memory dump
# how to manually create this file
# http://www.pctools.com/guides/registry/detail/856/
if 'nt' == os.name and 'memory_dump' == option_id:
fname = expandvars('$windir\\memory.dmp')
if os.path.exists(fname):
files += [fname]
for fname in glob.iglob(expandvars('$windir\\Minidump\\*.dmp')):
files += [fname]
# most recently used documents list
if 'posix' == os.name and 'recent_documents' == option_id:
files += [expanduser("~/.recently-used")]
# GNOME 2.26 (as seen on Ubuntu 9.04) will retain the list
# in memory if it is simply deleted, so it must be shredded
# (or at least truncated).
#
# GNOME 2.28.1 (Ubuntu 9.10) and 2.30 (10.04) do not re-read
# the file after truncation, but do re-read it after
# shredding.
#
# https://bugzilla.gnome.org/show_bug.cgi?id=591404
def gtk_purge_items():
"""Purge GTK items"""
gtk.RecentManager().purge_items()
yield 0
for pathname in ["~/.recently-used.xbel", "~/.local/share/recently-used.xbel"]:
pathname = expanduser(pathname)
if os.path.lexists(pathname):
yield Command.Shred(pathname)
if HAVE_GTK:
# Use the Function to skip when in preview mode
yield Command.Function(None, gtk_purge_items, _('Recent documents list'))
if 'posix' == os.name and 'rotated_logs' == option_id:
for path in Unix.rotated_logs():
yield Command.Delete(path)
# temporary files
if 'posix' == os.name and 'tmp' == option_id:
dirnames = ['/tmp', '/var/tmp']
for dirname in dirnames:
for path in children_in_directory(dirname, True):
is_open = FileUtilities.openfiles.is_open(path)
ok = not is_open and os.path.isfile(path) and \
not os.path.islink(path) and \
FileUtilities.ego_owner(path) and \
not self.whitelisted(path)
if ok:
yield Command.Delete(path)
# temporary files
if 'nt' == os.name and 'tmp' == option_id:
dirname = expandvars(
"$USERPROFILE\\Local Settings\\Temp\\")
# whitelist the folder %TEMP%\Low but not its contents
# https://bugs.launchpad.net/bleachbit/+bug/1421726
low = os.path.join(dirname, 'low').lower()
for filename in children_in_directory(dirname, True):
if not low == filename.lower():
yield Command.Delete(filename)
dirname = expandvars("$windir\\temp\\")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
# trash
if 'posix' == os.name and 'trash' == option_id:
dirname = expanduser("~/.Trash")
for filename in children_in_directory(dirname, False):
yield Command.Delete(filename)
# fixme http://www.ramendik.ru/docs/trashspec.html
# http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html
# ~/.local/share/Trash
# * GNOME 2.22, Fedora 9
# * KDE 4.1.3, Ubuntu 8.10
dirname = expanduser("~/.local/share/Trash/files")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
dirname = expanduser("~/.local/share/Trash/info")
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
dirname = expanduser("~/.local/share/Trash/expunged")
# desrt@irc.gimpnet.org tells me that the trash
# backend puts files in here temporary, but in some situations
# the files are stuck.
for filename in children_in_directory(dirname, True):
yield Command.Delete(filename)
# clipboard
if HAVE_GTK and 'clipboard' == option_id:
def clear_clipboard():
gtk.gdk.threads_enter()
clipboard = gtk.clipboard_get()
clipboard.set_text("")
gtk.gdk.threads_leave()
return 0
yield Command.Function(None, clear_clipboard, _('Clipboard'))
# overwrite free space
shred_drives = options.get_list('shred_drives')
if 'free_disk_space' == option_id and shred_drives:
for pathname in shred_drives:
# TRANSLATORS: 'Free' means 'unallocated.'
# %s expands to a path such as C:\ or /tmp/
display = _("Overwrite free disk space %s") % pathname
def wipe_path_func():
for ret in FileUtilities.wipe_path(pathname, idle=True):
# Yield control to GTK idle because this process
# is very slow. Also display progress.
yield ret
yield 0
yield Command.Function(None, wipe_path_func, display)
# MUICache
if 'nt' == os.name and 'muicache' == option_id:
keys = (
'HKCU\\Software\\Microsoft\\Windows\\ShellNoRoam\\MUICache',
'HKCU\\Software\\Classes\\Local Settings\\Software\\Microsoft\\Windows\\Shell\\MuiCache')
for key in keys:
yield Command.Winreg(key, None)
# prefetch
if 'nt' == os.name and 'prefetch' == option_id:
for path in glob.iglob(expandvars('$windir\\Prefetch\\*.pf')):
yield Command.Delete(path)
# recycle bin
if 'nt' == os.name and 'recycle_bin' == option_id:
# This method allows shredding
recycled_any = False
for path in Windows.get_recycle_bin():
recycled_any = True
yield Command.Delete(path)
# If there were any files deleted, Windows XP will show the
# wrong icon for the recycle bin indicating it is not empty.
# The icon will be incorrect until logging in to Windows again
# or until it is emptied using the Windows API call for emptying
# the recycle bin.
# Windows 10 refreshes the recycle bin icon when the user
# opens the recycle bin folder.
# This is a hack to refresh the icon.
def empty_recycle_bin_func():
import tempfile
tmpdir = tempfile.mkdtemp()
Windows.move_to_recycle_bin(tmpdir)
try:
Windows.empty_recycle_bin(None, True)
except:
logging.getLogger(__name__).info('error in empty_recycle_bin()', exc_info=True)
yield 0
# Using the Function Command prevents emptying the recycle bin
# when in preview mode.
if recycled_any:
yield Command.Function(None, empty_recycle_bin_func, _('Empty the recycle bin'))
# Windows Updates
if 'nt' == os.name and 'updates' == option_id:
for wu in Windows.delete_updates():
yield wu
# return queued files
for filename in files:
if os.path.lexists(filename):
yield Command.Delete(filename)
def whitelisted(self, pathname):
"""Return boolean whether file is whitelisted"""
regexes = [
'^/tmp/.X0-lock$',
'^/tmp/.truecrypt_aux_mnt.*/(control|volume)$',
'^/tmp/.vbox-[^/]+-ipc/lock$',
'^/tmp/.wine-[0-9]+/server-.*/lock$',
'^/tmp/gconfd-[^/]+/lock/ior$',
'^/tmp/fsa/', # fsarchiver
'^/tmp/kde-',
'^/tmp/kdesudo-',
'^/tmp/ksocket-',
'^/tmp/orbit-[^/]+/bonobo-activation-register[a-z0-9-]*.lock$',
'^/tmp/orbit-[^/]+/bonobo-activation-server-[a-z0-9-]*ior$',
'^/tmp/pulse-[^/]+/pid$',
'^/var/tmp/kdecache-',
'^' + expanduser('~/.cache/wallpaper/'),
# Clean Firefox cache from Firefox cleaner (LP#1295826)
'^' + expanduser('~/.cache/mozilla'),
# Clean Google Chrome cache from Google Chrome cleaner (LP#656104)
'^' + expanduser('~/.cache/google-chrome'),
'^' + expanduser('~/.cache/gnome-control-center/'),
# iBus Pinyin
# https://bugs.launchpad.net/bleachbit/+bug/1538919
'^' + expanduser('~/.cache/ibus/')]
for regex in regexes:
if re.match(regex, pathname) is not None:
return True
return False
def register_cleaners():
"""Register all known cleaners: system, CleanerML, and Winapp2"""
global backends
# wipe out any registrations
# Because this is a global variable, cannot use backends = {}
backends.clear()
# initialize "hard coded" (non-CleanerML) backends
backends["firefox"] = Firefox()
backends["openofficeorg"] = OpenOfficeOrg()
backends["system"] = System()
# register CleanerML cleaners
from bleachbit import CleanerML
CleanerML.load_cleaners()
# register Winapp2.ini cleaners
if 'nt' == os.name:
from bleachbit import Winapp
Winapp.load_cleaners()
def create_simple_cleaner(paths):
"""Shred arbitrary files (used in CLI and GUI)"""
cleaner = Cleaner()
cleaner.add_option(option_id='files', name='', description='')
cleaner.name = _("System") # shows up in progress bar
from bleachbit import Action
class CustomFileAction(Action.ActionProvider):
action_key = '__customfileaction'
def get_commands(self):
for path in paths:
if not isinstance(path, (str, unicode)):
raise RuntimeError(
'expected path as string but got %s' % str(path))
if not os.path.isabs(path):
path = os.path.abspath(path)
if os.path.isdir(path):
for child in children_in_directory(path, True):
yield Command.Shred(child)
yield Command.Shred(path)
else:
yield Command.Shred(path)
provider = CustomFileAction(None)
cleaner.add_action('files', provider)
return cleaner
def create_wipe_cleaner(path):
"""Wipe free disk space of arbitrary paths (used in GUI)"""
cleaner = Cleaner()
cleaner.add_option(
option_id='free_disk_space', name='', description='')
cleaner.name = ''
# create a temporary cleaner object
display = _("Overwrite free disk space %s") % path
def wipe_path_func():
for ret in FileUtilities.wipe_path(path, idle=True):
yield ret
yield 0
from bleachbit import Action
class CustomWipeAction(Action.ActionProvider):
action_key = '__customwipeaction'
def get_commands(self):
yield Command.Function(None, wipe_path_func, display)
provider = CustomWipeAction(None)
cleaner.add_action('free_disk_space', provider)
return cleaner
| brahmastra2016/bleachbit | bleachbit/Cleaner.py | Python | gpl-3.0 | 41,217 |
import os
import ConfigParser
class SettingsIO(object):
""" ConfigParser abstraction """
def __init__(self, config_file):
self.config_file = config_file
self.config = ConfigParser.ConfigParser()
if os.path.exists(self.config_file):
self.config.read([self.config_file])
def read_setting(self, key, section='lutris'):
try:
value = self.config.get(section, key)
except ConfigParser.NoOptionError:
value = None
except ConfigParser.NoSectionError:
value = None
return value
def write_setting(self, key, value, section='lutris'):
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, key, str(value))
with open(self.config_file, 'wb') as config_file:
self.config.write(config_file)
| GoeGaming/lutris | lutris/util/settings.py | Python | gpl-3.0 | 894 |
import os
from ase.structure import molecule
from ase.io import read, write
from ase.parallel import rank
from gpaw import GPAW, restart
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
import cmr
#from cmr.tools.log import Log
#cmr.logger.set_message_selection(Log.MSG_TYPE_ALL)
calculate = True
recalculate = True
analyse_from_dir = True # analyse local cmr files
upload_to_db = False # upload cmr files to the database
analyse_from_db = False # analyse database
create_group = True # group calculations beloging to a given reaction
clean = False
if create_group: assert analyse_from_dir or analyse_from_db
if analyse_from_db: assert upload_to_db
symbol = 'Li'
# define the project in order to find it in the database!
project_id = 'my first project: atomize'
vacuum = 3.5
# calculator parameters
xc = 'LDA'
mode = 'lcao'
h = 0.20
cmr_params_template = {
'db_keywords': [project_id],
# add project_id also as a field to support search across projects
'project_id': project_id,
# user's tags
'U_vacuum': vacuum,
'U_xc': xc,
'U_mode': mode,
'U_h': h,
}
if calculate:
# molecule
formula = symbol + '2'
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Molecules do not need broken cell symmetry!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
# atom
formula = symbol
# set formula name to be written into the cmr file
cmr_params = cmr_params_template.copy()
cmr_params['U_formula'] = formula
cmrfile = formula + '.cmr'
system = molecule(formula)
system.center(vacuum=vacuum)
# Note: Li does not need broken cell symmetry! Many other atoms do!
if 0:
system.cell[1, 1] += 0.01
system.cell[2, 2] += 0.02
# Hund rule (for atoms)
hund = (len(system) == 1)
cmr_params['U_hund'] = hund
# first calculation: LDA lcao
calc = GPAW(mode=mode, xc=xc, h=h, hund=hund, txt=formula + '.txt')
system.set_calculator(calc)
e = system.get_potential_energy()
# write gpw file
calc.write(formula)
# add total energy to users tags
cmr_params['U_potential_energy'] = e
# write the information 'as in' corresponding trajectory file
# plus cmr_params into cmr file
write(cmrfile, system, cmr_params=cmr_params)
del calc
if recalculate:
# now calculate PBE energies on LDA orbitals
# molecule
formula = symbol + '2'
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
# atom
formula = symbol
system, calc = restart(formula, txt=None)
ediff = calc.get_xc_difference('PBE')
cmrfile = formula + '.cmr'
# add new results to the cmrfile
data = cmr.read(cmrfile)
data.set_user_variable('U_potential_energy_PBE', data['U_potential_energy'] + ediff)
data.write(cmrfile)
del calc
if analyse_from_dir:
# analyze the results from cmr files in the local directory
from cmr.ui import DirectoryReader
# read all compounds in the project with lcao and LDA orbitals
reader = DirectoryReader(directory='.', ext='.cmr')
all = reader.find(name_value_list=[('U_mode', 'lcao'), ('U_xc', 'LDA')],
keyword_list=[project_id])
if rank == 0:
print 'results from cmr files in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
if rank == 0:
# results are accesible only on master rank
r2 = all.get('U_formula', f2)
r1 = all.get('U_formula', f1)
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV]')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_dir.cmr');
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the cmr files present in the local directory'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if upload_to_db:
# upload cmr files to the database
if rank == 0:
os.system('cmr --commit ' + symbol + '*.cmr')
if analyse_from_db:
# analyze the results from the database
# analysis can only be performed on rank 0!!
from cmr.ui import DBReader
reader = DBReader()
all = reader.find(name_value_list=[('U_mode', 'lcao'),
('U_xc', 'LDA'),
#('db_user', '')
],
keyword_list=[project_id])
if rank == 0:
print 'results from the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature'])
# access the results directly and calculate atomization energies
f2 = symbol + '2'
f1 = symbol
# results are accesible only on master rank
r1 = all.get('U_formula', f1)
r2 = all.get('U_formula', f2)
# check if results were successfully retrieved, otherwise we have to wait
if r1 is None or r2 is None:
print "Results are not yet in the database. Wait, and try again."
else:
# calculate atomization energies (ea)
ea_LDA = 2 * r1['U_potential_energy'] - r2['U_potential_energy']
ea_PBE = 2 * r1['U_potential_energy_PBE'] - r2['U_potential_energy_PBE']
if rank == 0:
print 'atomization energy [eV] ' + xc + ' = ' + str(ea_LDA)
print 'atomization energy [eV] PBE = ' + str(ea_PBE)
if create_group:
# ea_LDA and ea_PBE define a group
group = cmr.create_group();
group.add(r1['db_hash']);
group.add(r2['db_hash']);
group.set_user_variable('U_ea_LDA', ea_LDA)
group.set_user_variable('U_ea_PBE', ea_PBE)
group.set_user_variable('U_description', 'atomization energy [eV] (from database)')
group.set_user_variable('U_reaction', '2 * ' + symbol + ' - ' + symbol + '2')
group.set_user_variable('db_keywords', [project_id])
group.set_user_variable('project_id', project_id)
group.write(symbol + '2_atomize_from_db.cmr');
group.write(".cmr");
if True:
all = reader.find(keyword_list=[project_id])
if rank == 0:
print 'contents of the database'
# print requested results
# column_length=0 aligns data in the table (-1 : data unaligned is default)
all.print_table(column_length=0,
columns=['U_formula', 'U_vacuum',
'U_xc', 'U_h', 'U_hund',
'U_potential_energy', 'U_potential_energy_PBE',
'ase_temperature', 'U_reaction', 'U_ea_LDA', 'U_ea_PBE', 'U_description'])
if clean:
if rank == 0:
for file in [symbol + '.cmr', symbol + '.gpw', symbol + '.txt',
symbol + '2.cmr', symbol + '2.gpw', symbol + '2.txt',
symbol + '2_atomize_from_dir.cmr',
symbol + '2_atomize_from_db.cmr']:
if os.path.exists(file): os.unlink(file)
| robwarm/gpaw-symm | gpaw/test/cmrtest/Li2_atomize.py | Python | gpl-3.0 | 10,201 |
#!/usr/bin/env python
"""
This example explores how logging behaves if no special
configuration is performed.
conclusions:
- we get an error printout which DOES NOT stop the program
that we do not have handlers.
"No handlers could be found for logger "root""
This is printed once and then no more.
this is true for both "__main__" and "root" which is
the logger you get when you do 'logging.getLogger()'
"""
import logging
# logger = logging.getLogger(__name__)
logger = logging.getLogger()
print("warning")
logger.warning("this is a warning message")
print("debug")
logger.debug("this is a debug message")
print("error")
logger.error("this is an error message")
| veltzer/demos-python | src/examples/short/logging/basic_no_conf.py | Python | gpl-3.0 | 687 |
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from collections import namedtuple
from calibre.customize import Plugin
from calibre.constants import iswindows
class DevicePlugin(Plugin):
"""
Defines the interface that should be implemented by backends that
communicate with an ebook reader.
"""
type = _('Device Interface')
#: Ordered list of supported formats
FORMATS = ["lrf", "rtf", "pdf", "txt"]
# If True, the config dialog will not show the formats box
HIDE_FORMATS_CONFIG_BOX = False
#: VENDOR_ID can be either an integer, a list of integers or a dictionary
#: If it is a dictionary, it must be a dictionary of dictionaries,
#: of the form::
#:
#: {
#: integer_vendor_id : { product_id : [list of BCDs], ... },
#: ...
#: }
#:
VENDOR_ID = 0x0000
#: An integer or a list of integers
PRODUCT_ID = 0x0000
#: BCD can be either None to not distinguish between devices based on BCD, or
#: it can be a list of the BCD numbers of all devices supported by this driver.
BCD = None
#: Height for thumbnails on the device
THUMBNAIL_HEIGHT = 68
#: Width for thumbnails on the device. Setting this will force thumbnails
#: to this size, not preserving aspect ratio. If it is not set, then
#: the aspect ratio will be preserved and the thumbnail will be no higher
#: than THUMBNAIL_HEIGHT
# THUMBNAIL_WIDTH = 68
#: Set this to True if the device supports updating cover thumbnails during
#: sync_booklists. Setting it to true will ask device.py to refresh the
#: cover thumbnails during book matching
WANTS_UPDATED_THUMBNAILS = False
#: Whether the metadata on books can be set via the GUI.
CAN_SET_METADATA = ['title', 'authors', 'collections']
#: Whether the device can handle device_db metadata plugboards
CAN_DO_DEVICE_DB_PLUGBOARD = False
# Set this to None if the books on the device are files that the GUI can
# access in order to add the books from the device to the library
BACKLOADING_ERROR_MESSAGE = _('Cannot get files from this device')
#: Path separator for paths to books on device
path_sep = os.sep
#: Icon for this device
icon = I('reader.png')
# Encapsulates an annotation fetched from the device
UserAnnotation = namedtuple('Annotation','type, value')
#: GUI displays this as a message if not None. Useful if opening can take a
#: long time
OPEN_FEEDBACK_MESSAGE = None
#: Set of extensions that are "virtual books" on the device
#: and therefore cannot be viewed/saved/added to library
#: For example: ``frozenset(['kobo'])``
VIRTUAL_BOOK_EXTENSIONS = frozenset([])
#: Whether to nuke comments in the copy of the book sent to the device. If
#: not None this should be short string that the comments will be replaced
#: by.
NUKE_COMMENTS = None
#: If True indicates that this driver completely manages device detection,
#: ejecting and so forth. If you set this to True, you *must* implement the
#: detect_managed_devices and debug_managed_device_detection methods.
#: A driver with this set to true is responsible for detection of devices,
#: managing a blacklist of devices, a list of ejected devices and so forth.
#: calibre will periodically call the detect_managed_devices() method and
#: is it returns a detected device, calibre will call open(). open() will
#: be called every time a device is returned even is previous calls to open()
#: failed, therefore the driver must maintain its own blacklist of failed
#: devices. Similarly, when ejecting, calibre will call eject() and then
#: assuming the next call to detect_managed_devices() returns None, it will
#: call post_yank_cleanup().
MANAGES_DEVICE_PRESENCE = False
#: If set the True, calibre will call the :meth:`get_driveinfo()` method
#: after the books lists have been loaded to get the driveinfo.
SLOW_DRIVEINFO = False
#: If set to True, calibre will ask the user if they want to manage the
#: device with calibre, the first time it is detected. If you set this to
#: True you must implement :meth:`get_device_uid()` and
#: :meth:`ignore_connected_device()` and
#: :meth:`get_user_blacklisted_devices` and
#: :meth:`set_user_blacklisted_devices`
ASK_TO_ALLOW_CONNECT = False
@classmethod
def get_gui_name(cls):
if hasattr(cls, 'gui_name'):
return cls.gui_name
if hasattr(cls, '__name__'):
return cls.__name__
return cls.name
# Device detection {{{
def test_bcd_windows(self, device_id, bcd):
if bcd is None or len(bcd) == 0:
return True
for c in bcd:
rev = 'rev_%4.4x'%c
# Bug in winutil.get_usb_devices sometimes converts a to :
if rev in device_id or rev.replace('a', ':') in device_id:
return True
return False
def print_usb_device_info(self, info):
try:
print '\t', repr(info)
except:
import traceback
traceback.print_exc()
def is_usb_connected_windows(self, devices_on_system, debug=False,
only_presence=False):
def id_iterator():
if hasattr(self.VENDOR_ID, 'keys'):
for vid in self.VENDOR_ID:
vend = self.VENDOR_ID[vid]
for pid in vend:
bcd = vend[pid]
yield vid, pid, bcd
else:
vendors = self.VENDOR_ID if hasattr(self.VENDOR_ID, '__len__') else [self.VENDOR_ID]
products = self.PRODUCT_ID if hasattr(self.PRODUCT_ID, '__len__') else [self.PRODUCT_ID]
for vid in vendors:
for pid in products:
yield vid, pid, self.BCD
for vendor_id, product_id, bcd in id_iterator():
vid, pid = 'vid_%4.4x'%vendor_id, 'pid_%4.4x'%product_id
vidd, pidd = 'vid_%i'%vendor_id, 'pid_%i'%product_id
for device_id in devices_on_system:
if (vid in device_id or vidd in device_id) and \
(pid in device_id or pidd in device_id) and \
self.test_bcd_windows(device_id, bcd):
if debug:
self.print_usb_device_info(device_id)
if only_presence or self.can_handle_windows(device_id, debug=debug):
try:
bcd = int(device_id.rpartition(
'rev_')[-1].replace(':', 'a'), 16)
except:
bcd = None
return True, (vendor_id, product_id, bcd, None,
None, None)
return False, None
def test_bcd(self, bcdDevice, bcd):
if bcd is None or len(bcd) == 0:
return True
for c in bcd:
if c == bcdDevice:
return True
return False
def is_usb_connected(self, devices_on_system, debug=False,
only_presence=False):
'''
Return True, device_info if a device handled by this plugin is currently connected.
:param devices_on_system: List of devices currently connected
'''
if iswindows:
return self.is_usb_connected_windows(devices_on_system,
debug=debug, only_presence=only_presence)
vendors_on_system = set([x[0] for x in devices_on_system])
vendors = self.VENDOR_ID if hasattr(self.VENDOR_ID, '__len__') else [self.VENDOR_ID]
if hasattr(self.VENDOR_ID, 'keys'):
products = []
for ven in self.VENDOR_ID:
products.extend(self.VENDOR_ID[ven].keys())
else:
products = self.PRODUCT_ID if hasattr(self.PRODUCT_ID, '__len__') else [self.PRODUCT_ID]
for vid in vendors:
if vid in vendors_on_system:
for dev in devices_on_system:
cvid, pid, bcd = dev[:3]
if cvid == vid:
if pid in products:
if hasattr(self.VENDOR_ID, 'keys'):
try:
cbcd = self.VENDOR_ID[vid][pid]
except KeyError:
# Vendor vid does not have product pid, pid
# exists for some other vendor in this
# device
continue
else:
cbcd = self.BCD
if self.test_bcd(bcd, cbcd):
if debug:
self.print_usb_device_info(dev)
if self.can_handle(dev, debug=debug):
return True, dev
return False, None
def detect_managed_devices(self, devices_on_system, force_refresh=False):
'''
Called only if MANAGES_DEVICE_PRESENCE is True.
Scan for devices that this driver can handle. Should return a device
object if a device is found. This object will be passed to the open()
method as the connected_device. If no device is found, return None. The
returned object can be anything, calibre does not use it, it is only
passed to open().
This method is called periodically by the GUI, so make sure it is not
too resource intensive. Use a cache to avoid repeatedly scanning the
system.
:param devices_on_system: Set of USB devices found on the system.
:param force_refresh: If True and the driver uses a cache to prevent
repeated scanning, the cache must be flushed.
'''
raise NotImplementedError()
def debug_managed_device_detection(self, devices_on_system, output):
'''
Called only if MANAGES_DEVICE_PRESENCE is True.
Should write information about the devices detected on the system to
output, which is a file like object.
Should return True if a device was detected and successfully opened,
otherwise False.
'''
raise NotImplementedError()
# }}}
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
"""
:param key: The key to unlock the device
:param log_packets: If true the packet stream to/from the device is logged
:param report_progress: Function that is called with a % progress
(number between 0 and 100) for various tasks
If it is called with -1 that means that the
task does not have any progress information
:param detected_device: Device information from the device scanner
"""
raise NotImplementedError()
def can_handle_windows(self, device_id, debug=False):
'''
Optional method to perform further checks on a device to see if this driver
is capable of handling it. If it is not it should return False. This method
is only called after the vendor, product ids and the bcd have matched, so
it can do some relatively time intensive checks. The default implementation
returns True. This method is called only on windows. See also
:meth:`can_handle`.
:param device_info: On windows a device ID string. On Unix a tuple of
``(vendor_id, product_id, bcd)``.
'''
return True
def can_handle(self, device_info, debug=False):
'''
Unix version of :meth:`can_handle_windows`
:param device_info: Is a tuple of (vid, pid, bcd, manufacturer, product,
serial number)
'''
return True
def open(self, connected_device, library_uuid):
'''
Perform any device specific initialization. Called after the device is
detected but before any other functions that communicate with the device.
For example: For devices that present themselves as USB Mass storage
devices, this method would be responsible for mounting the device or
if the device has been automounted, for finding out where it has been
mounted. The method :meth:`calibre.devices.usbms.device.Device.open` has
an implementation of
this function that should serve as a good example for USB Mass storage
devices.
This method can raise an OpenFeedback exception to display a message to
the user.
:param connected_device: The device that we are trying to open. It is
a tuple of (vendor id, product id, bcd, manufacturer name, product
name, device serial number). However, some devices have no serial
number and on windows only the first three fields are present, the
rest are None.
:param library_uuid: The UUID of the current calibre library. Can be
None if there is no library (for example when used from the command
line).
'''
raise NotImplementedError()
def eject(self):
'''
Un-mount / eject the device from the OS. This does not check if there
are pending GUI jobs that need to communicate with the device.
NOTE: That this method may not be called on the same thread as the rest
of the device methods.
'''
raise NotImplementedError()
def post_yank_cleanup(self):
'''
Called if the user yanks the device without ejecting it first.
'''
raise NotImplementedError()
def set_progress_reporter(self, report_progress):
'''
Set a function to report progress information.
:param report_progress: Function that is called with a % progress
(number between 0 and 100) for various tasks
If it is called with -1 that means that the
task does not have any progress information
'''
raise NotImplementedError()
def get_device_information(self, end_session=True):
"""
Ask device for device information. See L{DeviceInfoQuery}.
:return: (device name, device version, software version on device, mime type)
The tuple can optionally have a fifth element, which is a
drive information dictionary. See usbms.driver for an example.
"""
raise NotImplementedError()
def get_driveinfo(self):
'''
Return the driveinfo dictionary. Usually called from
get_device_information(), but if loading the driveinfo is slow for this
driver, then it should set SLOW_DRIVEINFO. In this case, this method
will be called by calibre after the book lists have been loaded. Note
that it is not called on the device thread, so the driver should cache
the drive info in the books() method and this function should return
the cached data.
'''
return {}
def card_prefix(self, end_session=True):
'''
Return a 2 element list of the prefix to paths on the cards.
If no card is present None is set for the card's prefix.
E.G.
('/place', '/place2')
(None, 'place2')
('place', None)
(None, None)
'''
raise NotImplementedError()
def total_space(self, end_session=True):
"""
Get total space available on the mountpoints:
1. Main memory
2. Memory Card A
3. Memory Card B
:return: A 3 element list with total space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return 0.
"""
raise NotImplementedError()
def free_space(self, end_session=True):
"""
Get free space available on the mountpoints:
1. Main memory
2. Card A
3. Card B
:return: A 3 element list with free space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return -1.
"""
raise NotImplementedError()
def books(self, oncard=None, end_session=True):
"""
Return a list of ebooks on the device.
:param oncard: If 'carda' or 'cardb' return a list of ebooks on the
specific storage card, otherwise return list of ebooks
in main memory of device. If a card is specified and no
books are on the card return empty list.
:return: A BookList.
"""
raise NotImplementedError()
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
'''
Upload a list of books to the device. If a file already
exists on the device, it should be replaced.
This method should raise a :class:`FreeSpaceError` if there is not enough
free space on the device. The text of the FreeSpaceError must contain the
word "card" if ``on_card`` is not None otherwise it must contain the word "memory".
:param files: A list of paths
:param names: A list of file names that the books should have
once uploaded to the device. len(names) == len(files)
:param metadata: If not None, it is a list of :class:`Metadata` objects.
The idea is to use the metadata to determine where on the device to
put the book. len(metadata) == len(files). Apart from the regular
cover (path to cover), there may also be a thumbnail attribute, which should
be used in preference. The thumbnail attribute is of the form
(width, height, cover_data as jpeg).
:return: A list of 3-element tuples. The list is meant to be passed
to :meth:`add_books_to_metadata`.
'''
raise NotImplementedError()
@classmethod
def add_books_to_metadata(cls, locations, metadata, booklists):
'''
Add locations to the booklists. This function must not communicate with
the device.
:param locations: Result of a call to L{upload_books}
:param metadata: List of :class:`Metadata` objects, same as for
:meth:`upload_books`.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError
def delete_books(self, paths, end_session=True):
'''
Delete books at paths on device.
'''
raise NotImplementedError()
@classmethod
def remove_books_from_metadata(cls, paths, booklists):
'''
Remove books from the metadata list. This function must not communicate
with the device.
:param paths: paths to books on the device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError()
def sync_booklists(self, booklists, end_session=True):
'''
Update metadata on device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError()
def get_file(self, path, outfile, end_session=True):
'''
Read the file at ``path`` on the device and write it to outfile.
:param outfile: file object like ``sys.stdout`` or the result of an
:func:`open` call.
'''
raise NotImplementedError()
@classmethod
def config_widget(cls):
'''
Should return a QWidget. The QWidget contains the settings for the
device interface
'''
raise NotImplementedError()
@classmethod
def save_settings(cls, settings_widget):
'''
Should save settings to disk. Takes the widget created in
:meth:`config_widget` and saves all settings to disk.
'''
raise NotImplementedError()
@classmethod
def settings(cls):
'''
Should return an opts object. The opts object should have at least one
attribute `format_map` which is an ordered list of formats for the
device.
'''
raise NotImplementedError()
def set_plugboards(self, plugboards, pb_func):
'''
provide the driver the current set of plugboards and a function to
select a specific plugboard. This method is called immediately before
add_books and sync_booklists.
pb_func is a callable with the following signature::
def pb_func(device_name, format, plugboards)
You give it the current device name (either the class name or
DEVICE_PLUGBOARD_NAME), the format you are interested in (a 'real'
format or 'device_db'), and the plugboards (you were given those by
set_plugboards, the same place you got this method).
:return: None or a single plugboard instance.
'''
pass
def set_driveinfo_name(self, location_code, name):
'''
Set the device name in the driveinfo file to 'name'. This setting will
persist until the file is re-created or the name is changed again.
Non-disk devices should implement this method based on the location
codes returned by the get_device_information() method.
'''
pass
def prepare_addable_books(self, paths):
'''
Given a list of paths, returns another list of paths. These paths
point to addable versions of the books.
If there is an error preparing a book, then instead of a path, the
position in the returned list for that book should be a three tuple:
(original_path, the exception instance, traceback)
'''
return paths
def startup(self):
'''
Called when calibre is is starting the device. Do any initialization
required. Note that multiple instances of the class can be instantiated,
and thus __init__ can be called multiple times, but only one instance
will have this method called. This method is called on the device
thread, not the GUI thread.
'''
pass
def shutdown(self):
'''
Called when calibre is shutting down, either for good or in preparation
to restart. Do any cleanup required. This method is called on the
device thread, not the GUI thread.
'''
pass
def get_device_uid(self):
'''
Must return a unique id for the currently connected device (this is
called immediately after a successful call to open()). You must
implement this method if you set ASK_TO_ALLOW_CONNECT = True
'''
raise NotImplementedError()
def ignore_connected_device(self, uid):
'''
Should ignore the device identified by uid (the result of a call to
get_device_uid()) in the future. You must implement this method if you
set ASK_TO_ALLOW_CONNECT = True. Note that this function is called
immediately after open(), so if open() caches some state, the driver
should reset that state.
'''
raise NotImplementedError()
def get_user_blacklisted_devices(self):
'''
Return map of device uid to friendly name for all devices that the user
has asked to be ignored.
'''
return {}
def set_user_blacklisted_devices(self, devices):
'''
Set the list of device uids that should be ignored by this driver.
'''
pass
def specialize_global_preferences(self, device_prefs):
'''
Implement this method if your device wants to override a particular
preference. You must ensure that all call sites that want a preference
that can be overridden use device_prefs['something'] instead
of prefs['something']. Your
method should call device_prefs.set_overrides(pref=val, pref=val, ...).
Currently used for:
metadata management (prefs['manage_device_metadata'])
'''
device_prefs.set_overrides()
# Dynamic control interface.
# The following methods are probably called on the GUI thread. Any driver
# that implements these methods must take pains to be thread safe, because
# the device_manager might be using the driver at the same time that one of
# these methods is called.
def is_dynamically_controllable(self):
'''
Called by the device manager when starting plugins. If this method returns
a string, then a) it supports the device manager's dynamic control
interface, and b) that name is to be used when talking to the plugin.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return None
def start_plugin(self):
'''
This method is called to start the plugin. The plugin should begin
to accept device connections however it does that. If the plugin is
already accepting connections, then do nothing.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def stop_plugin(self):
'''
This method is called to stop the plugin. The plugin should no longer
accept connections, and should cleanup behind itself. It is likely that
this method should call shutdown. If the plugin is already not accepting
connections, then do nothing.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def get_option(self, opt_string, default=None):
'''
Return the value of the option indicated by opt_string. This method can
be called when the plugin is not started. Return None if the option does
not exist.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return default
def set_option(self, opt_string, opt_value):
'''
Set the value of the option indicated by opt_string. This method can
be called when the plugin is not started.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def is_running(self):
'''
Return True if the plugin is started, otherwise false
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return False
class BookList(list):
'''
A list of books. Each Book object must have the fields
#. title
#. authors
#. size (file size of the book)
#. datetime (a UTC time tuple)
#. path (path on the device to the book)
#. thumbnail (can be None) thumbnail is either a str/bytes object with the
image data or it should have an attribute image_path that stores an
absolute (platform native) path to the image
#. tags (a list of strings, can be empty).
'''
__getslice__ = None
__setslice__ = None
def __init__(self, oncard, prefix, settings):
pass
def supports_collections(self):
''' Return True if the device supports collections for this book list. '''
raise NotImplementedError()
def add_book(self, book, replace_metadata):
'''
Add the book to the booklist. Intent is to maintain any device-internal
metadata. Return True if booklists must be sync'ed
'''
raise NotImplementedError()
def remove_book(self, book):
'''
Remove a book from the booklist. Correct any device metadata at the
same time
'''
raise NotImplementedError()
def get_collections(self, collection_attributes):
'''
Return a dictionary of collections created from collection_attributes.
Each entry in the dictionary is of the form collection name:[list of
books]
The list of books is sorted by book title, except for collections
created from series, in which case series_index is used.
:param collection_attributes: A list of attributes of the Book object
'''
raise NotImplementedError()
| yeyanchao/calibre | src/calibre/devices/interface.py | Python | gpl-3.0 | 29,487 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unit Tests for cylc.flow.parsec.validate.ParsecValidator.coerce methods."""
from typing import List
import pytest
from pytest import approx
from cylc.flow.parsec.config import ConfigNode as Conf
from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults
from cylc.flow.parsec.exceptions import IllegalValueError
from cylc.flow.parsec.validate import (
CylcConfigValidator as VDR,
DurationFloat,
ListValueError,
IllegalItemError,
ParsecValidator,
parsec_validate
)
@pytest.fixture
def sample_spec():
with Conf('myconf') as myconf:
with Conf('section1'):
Conf('value1', default='')
Conf('value2', default='what?')
with Conf('section2'):
Conf('enabled', VDR.V_BOOLEAN)
with Conf('section3'):
Conf('title', default='default', options=['1', '2'])
Conf(
'amounts',
VDR.V_INTEGER_LIST,
default=[1, 2, 3],
# options=[[1, 2, 3]]
)
with Conf('entries'):
Conf('key')
Conf('value')
with Conf('<whatever>'):
Conf('section300000', default='')
Conf('ids', VDR.V_INTEGER_LIST)
return myconf
@pytest.fixture
def validator_invalid_values():
"""
Data provider or invalid values for parsec validator. All values must not
be null (covered elsewhere), and not dict's.
Possible invalid scenarios must include:
- cfg[key] is a list AND a value is not in list of the possible values
- OR
- cfg[key] is not a list AND cfg[key] not in the list of possible values
:return: a list with sets of tuples for the test parameters
:rtype: list
"""
values = []
# variables reused throughout
spec = None
msg = None
# set 1 (t, f, f, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER_LIST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 3"
msg = None
values.append((spec, cfg, msg))
# set 2 (t, t, f, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER_LIST, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "1, 2, 5"
msg = '(type=option) value = [1, 2, 5]'
values.append((spec, cfg, msg))
# set 3 (f, f, t, f)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "2"
msg = None
values.append((spec, cfg, msg))
# set 4 (f, f, t, t)
with Conf('base') as spec:
Conf('value', VDR.V_INTEGER, default=1, options=[1, 2, 3, 4])
cfg = OrderedDictWithDefaults()
cfg['value'] = "5"
msg = '(type=option) value = 5'
values.append((spec, cfg, msg))
return values
@pytest.fixture
def strip_and_unquote_list():
return [
[
'"a,b", c, "d e"', # input
["a,b", "c", "d e"] # expected
],
[
'foo bar baz', # input
["foo bar baz"] # expected
],
[
'"a", \'b\', c', # input
["a", "b", "c"] # expected
],
[
'a b c, d e f', # input
["a b c", "d e f"] # expected
],
]
def test_list_value_error():
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = ListValueError(keys, value, "who cares")
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (who cares)'
assert expected == output
def test_list_value_error_with_exception():
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = ListValueError(keys, value, "who cares", exc)
output = str(error)
expected = '(type=list) [a,][b]c = a sample value - (test: who cares)'
assert expected == output
def test_illegal_value_error():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
error = IllegalValueError(value_type, keys, value)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value"
assert expected == output
def test_illegal_value_error_with_exception():
value_type = 'ClassA'
keys = ['a,', 'b', 'c']
value = 'a sample value'
exc = Exception('test')
error = IllegalValueError(value_type, keys, value, exc)
output = str(error)
expected = "(type=ClassA) [a,][b]c = a sample value - (test)"
assert expected == output
def test_illegal_item_error():
keys = ['a,', 'b', 'c']
key = 'a sample value'
error = IllegalItemError(keys, key)
output = str(error)
expected = "[a,][b][c]a sample value"
assert expected == output
def test_illegal_item_error_message():
keys = ['a,', 'b', 'c']
key = 'a sample value'
message = "invalid"
error = IllegalItemError(keys, key, message)
output = str(error)
expected = "[a,][b][c]a sample value - (invalid)"
assert expected == output
def test_parsec_validator_invalid_key(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_no_spec(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section22'] = 'abc'
# remove the user-defined section from the spec
sample_spec._children = {
key: value
for key, value in sample_spec._children.items()
if key != '__MANY__'
}
with pytest.raises(IllegalItemError):
parsec_validator.validate(cfg, sample_spec)
def test_parsec_validator_invalid_key_with_many_spaces(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section 3000000'] = 'test'
with pytest.raises(IllegalItemError) as cm:
parsec_validator.validate(cfg, sample_spec)
assert str(cm.exception) == "section 3000000 - (consecutive spaces)"
def test_parsec_validator_invalid_key_with_many_invalid_values(
validator_invalid_values
):
for spec, cfg, msg in validator_invalid_values:
parsec_validator = ParsecValidator()
if msg is not None:
with pytest.raises(IllegalValueError) as cm:
parsec_validator.validate(cfg, spec)
assert msg == str(cm.value)
else:
# cylc.flow.parsec_validator.validate(cfg, spec)
# let's use the alias `parsec_validate` here
parsec_validate(cfg, spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator_invalid_key_with_many_1(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section3000000'] = OrderedDictWithDefaults()
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator_invalid_key_with_many_2(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section3'] = OrderedDictWithDefaults()
cfg['section3']['title'] = '1'
cfg['section3']['entries'] = OrderedDictWithDefaults()
cfg['section3']['entries']['key'] = 'name'
cfg['section3']['entries']['value'] = "1, 2, 3, 4"
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
def test_parsec_validator(sample_spec):
parsec_validator = ParsecValidator()
cfg = OrderedDictWithDefaults()
cfg['section1'] = OrderedDictWithDefaults()
cfg['section1']['value1'] = '1'
cfg['section1']['value2'] = '2'
cfg['section3'] = OrderedDictWithDefaults()
cfg['section3']['title'] = None
parsec_validator.validate(cfg, sample_spec)
# TBD assertIsNotNone when 2.6+
assert parsec_validator is not None
# --- static methods
def test_coerce_none_fails():
with pytest.raises(AttributeError):
ParsecValidator.coerce_boolean(None, [])
with pytest.raises(AttributeError):
ParsecValidator.coerce_float(None, [])
with pytest.raises(AttributeError):
ParsecValidator.coerce_int(None, [])
def test_coerce_boolean():
"""Test coerce_boolean."""
validator = ParsecValidator()
# The good
for value, result in [
('True', True),
(' True ', True),
('"True"', True),
("'True'", True),
('true', True),
(' true ', True),
('"true"', True),
("'true'", True),
('False', False),
(' False ', False),
('"False"', False),
("'False'", False),
('false', False),
(' false ', False),
('"false"', False),
("'false'", False),
('', None),
(' ', None)
]:
assert validator.coerce_boolean(value, ['whatever']) == result
# The bad
for value in [
'None', ' Who cares? ', '3.14', '[]', '[True]', 'True, False'
]:
with pytest.raises(IllegalValueError):
validator.coerce_boolean(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('3', 3.0),
('9.80', 9.80),
('3.141592654', 3.141592654),
('"3.141592654"', 3.141592654),
("'3.141592654'", 3.141592654),
('-3', -3.0),
('-3.1', -3.1),
('0', 0.0),
('-0', -0.0),
('0.0', 0.0),
('1e20', 1.0e20),
('6.02e23', 6.02e23),
('-1.6021765e-19', -1.6021765e-19),
('6.62607004e-34', 6.62607004e-34),
]
)
def test_coerce_float(value: str, expected: float):
"""Test coerce_float."""
assert (
ParsecValidator.coerce_float(value, ['whatever']) == approx(expected)
)
def test_coerce_float__empty():
# not a number
assert ParsecValidator.coerce_float('', ['whatever']) is None
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '[]', '[3.14]', '3.14, 2.72']
)
def test_coerce_float__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('', []),
('3', [3.0]),
('2*3.141592654', [3.141592654, 3.141592654]),
('12*8, 8*12.0', [8.0] * 12 + [12.0] * 8),
('-3, -2, -1, -0.0, 1.0', [-3.0, -2.0, -1.0, -0.0, 1.0]),
('6.02e23, -1.6021765e-19, 6.62607004e-34',
[6.02e23, -1.6021765e-19, 6.62607004e-34]),
]
)
def test_coerce_float_list(value: str, expected: List[float]):
"""Test coerce_float_list."""
items = ParsecValidator.coerce_float_list(value, ['whatever'])
assert items == approx(expected)
@pytest.mark.parametrize(
'value',
['None', 'e, i, e, i, o', '[]', '[3.14]', 'pi, 2.72', '2*True']
)
def test_coerce_float_list__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_float_list(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('0', 0),
('3', 3),
('-3', -3),
('-0', -0),
('653456', 653456),
('-8362583645365', -8362583645365)
]
)
def test_coerce_int(value: str, expected: int):
"""Test coerce_int."""
assert ParsecValidator.coerce_int(value, ['whatever']) == expected
def test_coerce_int__empty():
assert ParsecValidator.coerce_int('', ['whatever']) is None # not a number
@pytest.mark.parametrize(
'value',
['None', ' Who cares? ', 'True', '4.8', '[]', '[3]', '60*60']
)
def test_coerce_int__bad(value: str):
with pytest.raises(IllegalValueError):
ParsecValidator.coerce_int(value, ['whatever'])
def test_coerce_int_list():
"""Test coerce_int_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('3', [3]),
('1..10, 11..20..2',
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19]),
('18 .. 24', [18, 19, 20, 21, 22, 23, 24]),
('18 .. 24 .. 3', [18, 21, 24]),
('-10..10..3', [-10, -7, -4, -1, 2, 5, 8]),
('10*3, 4*-6', [3] * 10 + [-6] * 4),
('10*128, -78..-72, 2048',
[128] * 10 + [-78, -77, -76, -75, -74, -73, -72, 2048])
]:
assert validator.coerce_int_list(value, ['whatever']) == results
# The bad
for value in [
'None', 'e, i, e, i, o', '[]', '1..3, x', 'one..ten'
]:
with pytest.raises(IllegalValueError):
validator.coerce_int_list(value, ['whatever'])
def test_coerce_str():
"""Test coerce_str."""
validator = ParsecValidator()
# The good
for value, result in [
('', ''),
('Hello World!', 'Hello World!'),
('"Hello World!"', 'Hello World!'),
('"Hello Cylc\'s World!"', 'Hello Cylc\'s World!'),
("'Hello World!'", 'Hello World!'),
('0', '0'),
('My list is:\nfoo, bar, baz\n', 'My list is:\nfoo, bar, baz'),
(' Hello:\n foo\n bar\n baz\n',
'Hello:\nfoo\nbar\nbaz'),
(' Hello:\n foo\n Greet\n baz\n',
'Hello:\n foo\nGreet\n baz'),
('False', 'False'),
('None', 'None'),
(['a', 'b'], 'a\nb')
]:
assert validator.coerce_str(value, ['whatever']) == result
def test_coerce_str_list():
"""Test coerce_str_list."""
validator = ParsecValidator()
# The good
for value, results in [
('', []),
('Hello', ['Hello']),
('"Hello"', ['Hello']),
('1', ['1']),
('Mercury, Venus, Earth, Mars',
['Mercury', 'Venus', 'Earth', 'Mars']),
('Mercury, Venus, Earth, Mars,\n"Jupiter",\n"Saturn"\n',
['Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturn']),
('New Zealand, United Kingdom',
['New Zealand', 'United Kingdom'])
]:
assert validator.coerce_str_list(value, ['whatever']) == results
def test_strip_and_unquote():
with pytest.raises(IllegalValueError):
ParsecValidator.strip_and_unquote(['a'], '"""')
def test_strip_and_unquote_list_parsec():
"""Test strip_and_unquote_list using ParsecValidator."""
for value, results in [
('"a"\n"b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"a", "b"', ['a', 'b']),
('"c" # d', ['c']),
('"a", "b", "c" # d', ['a', 'b', 'c']),
('"a"\n"b"\n"c" # d', ['a', 'b', 'c']),
("'a', 'b'", ['a', 'b']),
("'c' #d", ['c']),
("'a', 'b', 'c' # d", ['a', 'b', 'c']),
("'a'\n'b'\n'c' # d", ['a', 'b', 'c']),
('a, b, c,', ['a', 'b', 'c']),
('a, b, c # d', ['a', 'b', 'c']),
('a, b, c\n"d"', ['a', 'b', 'd']),
('a, b, c\n"d" # e', ['a', 'b', '"d"'])
]:
assert results == ParsecValidator.strip_and_unquote_list(
['a'], value)
def test_strip_and_unquote_list_cylc(strip_and_unquote_list):
"""Test strip_and_unquote_list using CylcConfigValidator."""
validator = VDR()
for values in strip_and_unquote_list:
value = values[0]
expected = values[1]
output = validator.strip_and_unquote_list(keys=[], value=value)
assert expected == output
def test_strip_and_unquote_list_multiparam():
with pytest.raises(ListValueError):
ParsecValidator.strip_and_unquote_list(
['a'], 'a, b, c<a,b>'
)
def test_coerce_cycle_point():
"""Test coerce_cycle_point."""
validator = VDR()
# The good
for value, result in [
('', None),
('3', '3'),
('2018', '2018'),
('20181225T12Z', '20181225T12Z'),
('2018-12-25T12:00+11:00', '2018-12-25T12:00+11:00')]:
assert validator.coerce_cycle_point(value, ['whatever']) == result
# The bad
for value in [
'None', ' Who cares? ', 'True', '1, 2', '20781340E10']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point(value, ['whatever'])
def test_coerce_cycle_point_format():
"""Test coerce_cycle_point_format."""
validator = VDR()
# The good
for value, result in [
('', None),
('%Y%m%dT%H%M%z', '%Y%m%dT%H%M%z'),
('CCYYMMDDThhmmZ', 'CCYYMMDDThhmmZ'),
('XCCYYMMDDThhmmZ', 'XCCYYMMDDThhmmZ')]:
assert (
validator.coerce_cycle_point_format(value, ['whatever'])
== result
)
# The bad
# '/' and ':' not allowed in cylc cycle points (they are used in paths).
for value in ['%i%j', 'Y/M/D', '%Y-%m-%dT%H:%MZ']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point_format(value, ['whatever'])
def test_coerce_cycle_point_time_zone():
"""Test coerce_cycle_point_time_zone."""
validator = VDR()
# The good
for value, result in [
('', None),
('Z', 'Z'),
('+0000', '+0000'),
('+0100', '+0100'),
('+1300', '+1300'),
('-0630', '-0630')]:
assert (
validator.coerce_cycle_point_time_zone(value, ['whatever'])
== result
)
# The bad
for value in ['None', 'Big Bang Time', 'Standard Galaxy Time']:
with pytest.raises(IllegalValueError):
validator.coerce_cycle_point_time_zone(value, ['whatever'])
def test_coerce_interval():
"""Test coerce_interval."""
validator = VDR()
# The good
for value, result in [
('', None),
('P3D', DurationFloat(259200)),
('PT10M10S', DurationFloat(610))]:
assert validator.coerce_interval(value, ['whatever']) == result
# The bad
for value in ['None', '5 days', '20', '-12']:
with pytest.raises(IllegalValueError):
validator.coerce_interval(value, ['whatever'])
@pytest.mark.parametrize(
'value, expected',
[
('', []),
('P3D', [DurationFloat(259200)]),
('P3D, PT10M10S', [DurationFloat(259200), DurationFloat(610)]),
('25*PT30M,10*PT1H',
[DurationFloat(1800)] * 25 + [DurationFloat(3600)] * 10)
]
)
def test_coerce_interval_list(value: str, expected: List[DurationFloat]):
"""Test coerce_interval_list."""
assert VDR.coerce_interval_list(value, ['whatever']) == approx(expected)
@pytest.mark.parametrize(
'value',
['None', '5 days', '20', 'PT10S, -12']
)
def test_coerce_interval_list__bad(value: str):
with pytest.raises(IllegalValueError):
VDR.coerce_interval_list(value, ['whatever'])
def test_coerce_parameter_list():
"""Test coerce_parameter_list."""
validator = VDR()
# The good
for value, result in [
('', []),
('planet', ['planet']),
('planet, star, galaxy', ['planet', 'star', 'galaxy']),
('1..5, 21..25', [1, 2, 3, 4, 5, 21, 22, 23, 24, 25]),
('-15, -10, -5, -1..1', [-15, -10, -5, -1, 0, 1])]:
assert validator.coerce_parameter_list(value, ['whatever']) == result
# The bad
for value in ['foo/bar', 'p1, 1..10', '2..3, 4, p']:
with pytest.raises(IllegalValueError):
validator.coerce_parameter_list(value, ['whatever'])
def test_coerce_xtrigger():
"""Test coerce_xtrigger."""
validator = VDR()
# The good
for value, result in [
('foo(x="bar")', 'foo(x=bar)'),
('foo(x, y, z="zebra")', 'foo(x, y, z=zebra)')]:
assert (
validator.coerce_xtrigger(value, ['whatever']).get_signature()
== result
)
# The bad
for value in [
'', 'foo(', 'foo)', 'foo,bar']:
with pytest.raises(IllegalValueError):
validator.coerce_xtrigger(value, ['whatever'])
def test_type_help_examples():
types = {
**ParsecValidator.V_TYPE_HELP,
**VDR.V_TYPE_HELP
}
validator = VDR()
for vdr, info in types.items():
coercer = validator.coercers[vdr]
if len(info) > 2:
for example in info[2]:
try:
coercer(example, [None])
except Exception:
raise Exception(
f'Example "{example}" failed for type "{vdr}"')
| oliver-sanders/cylc | tests/unit/parsec/test_validate.py | Python | gpl-3.0 | 21,599 |
__author__ = 'michogarcia'
from setuptools import setup, find_packages
version = '0.1'
setup(name='FullDiskAlert',
version=version,
author="Micho Garcia",
author_email="micho.garcia@geomati.co",
license="LICENSE.txt",
description="Sends mail when disk is above threshold",
packages=find_packages(),
install_requires=[
'pyyaml',
],
) | michogar/FullDiskAlert | setup.py | Python | gpl-3.0 | 399 |
#!/usr/bin/env python
"""
A simple python example for multiprocess.
Created by yetship at 2017/4/18 08:57
"""
from random import randint
import multiprocessing
def worker(lower, upper):
"""thread worker function"""
print("get a random int: {}".format(randint(lower, upper)))
if __name__ == '__main__':
for i in range(3):
p = multiprocessing.Process(target=worker, args=(i, i ** 2))
p.start() | yetship/blog_codes | multiprocess_demo/example02.py | Python | gpl-3.0 | 432 |
import re
from setuptools import setup, find_packages
# Auto detect the library version from the __init__.py file
with open('xbee/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(
name='XBee',
version=version,
description='Python tools for working with XBee radios',
long_description=open('README.rst').read(),
url='https://github.com/nioinnovation/python-xbee',
author='n.io',
author_email='info@n.io',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Terminals :: Serial',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
packages=find_packages(exclude=['tests', '*.tests']),
install_requires=['pyserial'],
extras_require={
'tornado': ['tornado']
}
)
| MichaelCoughlinAN/Odds-N-Ends | Python/Python Modules/XBee-2.3.1/setup.py | Python | gpl-3.0 | 1,137 |
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# License: BSD (3-clause)
import mkl
import sys
import os.path as op
import numpy as np
import mne
from mne.io import Raw
from mne.preprocessing import read_ica
from mne.viz import plot_drop_log
from meeg_preprocessing.utils import setup_provenance, set_eog_ecg_channels
from p300.conditions import events_select_condition, get_events
from p300.run_mask_subtraction import subtract_mask
from scripts.wip_artefacts import artefact_rej
from scripts.config import (
data_path,
subjects,
runs,
results_dir,
raw_fname_filt_tmp,
eog_ch,
ecg_ch,
events_fname_filt_tmp,
chan_types,
baseline,
epochs_params,
use_ica,
open_browser
)
report, run_id, results_dir, logger = setup_provenance(
script=__file__, results_dir=results_dir)
mne.set_log_level('INFO')
if len(sys.argv) > 1:
subjects = [sys.argv[1]]
mkl.set_num_threads(1)
for subject in subjects:
print(subject)
this_path = op.join(data_path, 'MEG', subject)
all_epochs = [list(), list()] # XXX should take len(epochs_params)
icas = list()
if use_ica is True:
for chan_type in chan_types:
icas.append(read_ica(
op.join(this_path, '{}-ica.fif'.format(chan_type['name']))))
for r in runs:
fname = op.join(this_path, raw_fname_filt_tmp.format(r))
if not op.isfile(fname):
logger.info('Could not find %s. Skipping' % fname)
continue
raw = Raw(fname)
# Set ECG EOG channels XXX again?
set_eog_ecg_channels(raw, eog_ch=eog_ch, ecg_ch=ecg_ch)
# Get events identified in run_extract_events
events = mne.read_events(
op.join(this_path, events_fname_filt_tmp.format(r)))
# Epoch data for each epoch type
for ep, epochs_list in zip(epochs_params, all_epochs):
# Select events
sel = events_select_condition(events[:, 2], ep['events'])
events_sel = events[sel, :]
# Only keep parameters applicable to mne.Epochs() XXX CLEAN UP JR!
ep_epochs = {key: v for key, v in ep.items() if key in ['event_id',
'tmin',
'tmax',
'baseline',
'reject',
'decim']}
# Epoch raw data
epochs = mne.Epochs(raw=raw, preload=True, events=events_sel,
**ep_epochs)
# Redefine t0 if necessary
if 'time_shift' in ep.keys():
epochs.times += ep['time_shift']
epochs.tmin += ep['time_shift']
epochs.tmax += ep['time_shift']
# ICA correction
if use_ica is True:
for ica in icas:
ica.apply(epochs)
# Append runs
epochs_list.append(epochs)
for name, epochs_list in zip([ep['name'] for ep in epochs_params],
all_epochs):
# Concatenate runs
epochs = mne.epochs.concatenate_epochs(epochs_list)
# Artefact rejection and rereference EEG data
# (EEG channel rejection, rereference, then trial rejection)
epochs = artefact_rej(name, subject, epochs)
# Save epochs before mask subtraction
if baseline:
epochs.save(op.join(this_path, '{}-{}-epo.fif'.format(name,
subject)))
else:
epochs.save(op.join(this_path, 'nobl-{}-{}-epo.fif'.format(name,
subject)))
# Plot before mask subtraction
# dropped
report.add_figs_to_section(
plot_drop_log(epochs.drop_log), '%s (%s): total dropped'
% (subject, name), 'Drop: ' + name)
# trigger channel
ch = mne.pick_channels(epochs.ch_names, ['STI101'])
epochs._data[:, ch, :] = (2.*(epochs._data[:, ch, :] > 1) +
1.*(epochs._data[:, ch, :] < 8192))
# evoked before mask subtraction
evoked = epochs.average()
fig = evoked.plot(show=False)
report.add_figs_to_section(fig, '%s (%s): butterfly' % (subject, name),
'Butterfly: ' + name)
times = np.arange(epochs.tmin, epochs.tmax,
(epochs.tmax - epochs.tmin) / 20)
for ch_type in chan_types:
ch_type = ch_type['name']
if ch_type in ['eeg', 'meg', 'mag']:
if ch_type == 'meg':
ch_type = 'mag'
fig = evoked.plot_topomap(times, ch_type=ch_type, show=False)
report.add_figs_to_section(
fig, '%s (%s): topo %s' % (subject, name, ch_type),
'Topo: ' + name)
# # Mask subtraction
events = get_events(epochs.events)
epochs = subtract_mask(epochs, events['soa_ttl'], events['present'])
# Save mask-subtracted data
if baseline:
epochs.save(op.join(this_path,
'{}-unmasked-{}-epo.fif'.format(name, subject)))
else:
epochs.save(op.join(this_path,
'nobl-{}-unmasked-{}-epo.fif'.format(name, subject)))
# Plot mask-subtracted data
evoked = epochs.average()
fig = evoked.plot(show=False)
report.add_figs_to_section(fig, '%s (%s) UNMASKED: butterfly'
% (subject, name), 'Butterfly: ' + name)
times = np.arange(epochs.tmin, epochs.tmax,
(epochs.tmax - epochs.tmin) / 20)
for ch_type in chan_types:
ch_type = ch_type['name']
if ch_type in ['eeg', 'meg', 'mag']:
if ch_type == 'meg':
ch_type = 'mag'
fig = evoked.plot_topomap(times, ch_type=ch_type, show=False)
report.add_figs_to_section(
fig, '%s (%s): UNMASKED: topo %s' % (subject, name, ch_type),
'Topo: ' + name)
report.save(open_browser=open_browser)
| kingjr/meg_expectation_p3 | scripts/run_extract_epochs.py | Python | gpl-3.0 | 6,371 |
"""return a jumbled version of a string. eg, the lazy hamster is jumping becomes the lzay hmasetr si jmunipg
shuffles insides of words.
"""
import random
import string
#okay, so this will be the jmuble algorythim
#variables, passed
#string_to_jumble = "" #yeah
#jumble_mode = true # do u switch words of two letters
def string_jumble(string_to_jumble, jumble_mode = True):
#variables, internal
string_to_return = "" #New string
string_words = [""] #array containing the words of the string
current_word = [] #array containing the letters of the current word
punctuation_ = [] #array containing the punctuation
i = 0
j = 0
#put the words in an array
for char in string_to_jumble:
#each space signifies a new word
if char not in string.ascii_letters:
punctuation_.append(char)
i += 1
##make sure there's something to put it in!
string_words.append("")
else:
#otherwise add to the entry
string_words[i] += char
#print(string_words) THIS IS WORKING
#put the letters of the word into an array, and then switch 'em
for word in string_words:
#if the word is two long and mode is true switch 'em
if (len(word) >= 0) and (len(word) <= 3) :
if jumble_mode == True:
#
for char in word:
current_word.append(str(char))
#print(current_word)
random.shuffle(current_word)
#pop the word and a space into the return string
for char in current_word:
string_to_return += char
string_to_return += punctuation_[string_words.index(word)]
#print(string_to_return)
current_word.clear()
#that's all for this word
continue
#ok now for the REAL real deal
#take away the first letter and put it in string_to_return bc it souldn't be jumbled
i = 0
for char in word:
if i == 0:
string_to_return += char
#print(string_to_return)
i = 1
#assert bluh WORKING
continue
#then put almost all of the word in current_word[]
#current_word.append("")
if (i+1) < len(word):
current_word.append(str(char))
#print(current_word)
i +=1
#we should be at the last character now
#print(i)
print(len(word)+100)
#jumble it
#random.shuffle(current_word)
#add to the new string
for char in current_word:
string_to_return += char
#add the last lettr pus a space
#if word[i]:
string_to_return += word[i]
#string_to_return += punctuation_[i]
string_to_return += punctuation_[string_words.index(word)]
print(punctuation_[string_words.index(word)])
#flush the string
current_word.clear()
#next word!
print(string_to_return)
print(punctuation_)
#done
#string_jumble("a0boop1boop3boop4boop5hey")
string_jumble("I1think2my3dog4is5terribly6lazy;7I8-9I!mean,£he$-%is^really&quite*fat.")#looks like list.index won't work for us
#string_jumble("")#fix this too
| Moth-Tolias/LetterBoy | backend/jumbles nontrobo/jumbles nontrobo.py | Python | gpl-3.0 | 4,226 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import pymeanshift as pms
from blobs.BlobResult import CBlobResult
from blobs.Blob import CBlob # Note: This must be imported in order to destroy blobs and use other methods
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
import sys
import os
blob_overlay = True
file_name = "plates/license1.png"
if len(sys.argv) != 1:
file_name = sys.argv[1]
base_name = os.path.basename(file_name)
fname_prefix = ".".join(base_name.split(".")[:-1])
print fname_prefix
# Image load & conversion to cvmat
license_plate = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_COLOR)
# Segment
segmented, labels, regions = pms.segment(license_plate, 3, 3, 50)
print "Segmentation results"
print "%s: %s" % ("labels", labels)
print "%s: %s" % ("regions", regions)
cv2.imwrite('%s_segmented.png' % fname_prefix, segmented)
license_plate = cv2.imread('%s_segmented.png' % fname_prefix, cv2.CV_LOAD_IMAGE_COLOR)
license_plate_size = (license_plate.shape[1], license_plate.shape[0])
license_plate_cvmat = cv2.cv.fromarray(license_plate)
license_plate_ipl = cv2.cv.CreateImage(license_plate_size, cv2.cv.IPL_DEPTH_8U, 3)
cv2.cv.SetData(
license_plate_ipl,
license_plate.tostring(),
license_plate.dtype.itemsize * 3 * license_plate.shape[1])
license_plate_white_ipl = cv2.cv.CreateImage(license_plate_size, cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.Set(license_plate_white_ipl, 255)
# Grayscale conversion
inverted_license_plate_grayscale_ipl = cv2.cv.CreateImage(
license_plate_size,
cv2.cv.IPL_DEPTH_8U, 1)
license_plate_grayscale_ipl = cv2.cv.CreateImage(
license_plate_size,
cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.CvtColor(
license_plate_cvmat,
license_plate_grayscale_ipl,
cv2.COLOR_RGB2GRAY);
license_plate_grayscale_np = np.asarray(license_plate_grayscale_ipl[:,:])
# We can also use cv.saveimage
# cv2.cv.SaveImage('license1_grayscale.png', license_plate_grayscale_ipl)
cv2.imwrite('%s_grayscale.png' % fname_prefix, license_plate_grayscale_np)
# Thresholding or binarization of images
(threshold_value, thresh_image) = cv2.threshold(
license_plate_grayscale_np,
128,
255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print "Thresholding complete. Partition value is %d" % threshold_value
cv2.imwrite('%s_threshold.png' % fname_prefix, thresh_image)
# Create a mask that will cover the entire image
mask = cv2.cv.CreateImage (license_plate_size, 8, 1)
cv2.cv.Set(mask, 1)
#if not blob_overlay:
# # Convert black-and-white version back into three-color representation
# cv2.cv.CvtColor(my_grayscale, frame_cvmat, cv2.COLOR_GRAY2RGB);
# Blob detection
thresh_image_ipl = cv2.cv.CreateImage(license_plate_size, cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(
thresh_image_ipl,
thresh_image.tostring(),
thresh_image.dtype.itemsize * 1 * thresh_image.shape[1])
cv2.cv.Not(thresh_image_ipl, inverted_license_plate_grayscale_ipl)
# Min blob size and Max blob size
min_blob_size = 100 # Blob must be 30 px by 30 px
max_blob_size = 10000
threshold = 100
# Plate area as % of image area:
max_plate_to_image_ratio = 0.3
min_plate_to_image_ratio = 0.01
image_area = license_plate_size[0] * license_plate_size[1]
# Mask - Blob extracted where mask is set to 1
# Third parameter is threshold value to apply prior to blob detection
# Boolean indicating whether we find moments
myblobs = CBlobResult(thresh_image_ipl, mask, threshold, True)
myblobs.filter_blobs(min_blob_size, max_blob_size)
blob_count = myblobs.GetNumBlobs()
print "Found %d blob[s] betweeen size %d and %d using threshold %d" % (
blob_count, min_blob_size, max_blob_size, threshold)
for i in range(blob_count):
my_enumerated_blob = myblobs.GetBlob(i)
# print "%d: Area = %d" % (i, my_enumerated_blob.Area())
my_enumerated_blob.FillBlob(
license_plate_grayscale_ipl,
#license_plate_ipl,
#cv2.cv.Scalar(255, 0, 0),
cv2.cv.CV_RGB(255, 0, 0),
0, 0)
my_enumerated_blob.FillBlob(
license_plate_white_ipl,
#license_plate_ipl,
#cv2.cv.Scalar(255, 0, 0),
cv2.cv.CV_RGB(255, 255, 255),
0, 0)
# we can now save the image
#annotated_image = np.asarray(license_plate_ipl[:,:])
blob_image = np.asarray(license_plate_grayscale_ipl[:,:])
cv2.imwrite("%s_blobs.png" % fname_prefix, blob_image)
blob_white_image = np.asarray(license_plate_white_ipl[:,:])
cv2.imwrite("%s_white_blobs.png" % fname_prefix, blob_white_image)
# Looking for a rectangle - Plates are rectangular
# Thresholding image, the find contours then approxPolyDP
(threshold_value, blob_threshold_image) = cv2.threshold(
blob_white_image,
128,
255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
print "Thresholding complete. Partition value is %d" % threshold_value
cv2.imwrite('%s_blob_threshold.png' % fname_prefix, blob_threshold_image)
# Blur to reduce noise?
#blurred_plate = cv2.GaussianBlur(blob_threshold_image, (5,5), 0)
#blob_threshold_image = blurred_plate
# Erode then dilate to reduce noise
blob_threshold_image_invert = cv2.bitwise_not(blob_threshold_image)
cv2.imwrite("%s_pre_dilated_and_eroded.png" % fname_prefix, blob_threshold_image_invert)
eroded_white_blobs = cv2.erode(blob_threshold_image_invert, None, iterations=4);
cv2.imwrite("%s_eroded_image.png" % fname_prefix, eroded_white_blobs)
dilated_white_blobs = cv2.dilate(eroded_white_blobs, None, iterations=4);
cv2.imwrite("%s_dilated.png" % fname_prefix, dilated_white_blobs)
blob_threshold_image = cv2.bitwise_not(blob_threshold_image_invert)
cv2.imwrite("%s_dilated_and_eroded.png" % fname_prefix, blob_threshold_image)
blob_threshold_image_invert = cv2.bitwise_not(blob_threshold_image)
contours, hierarchy = cv2.findContours(
blob_threshold_image,
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
#print "Contours: ", contours
# We now have contours. Approximate the polygon shapes
largest_rectangle_idx = 0
largest_rectangle_area = 0
rectangles = []
colours = ( (255,0,0), (0,255,0), (0,0,255), (255,255,0), (0,255,255))
for idx, contour in enumerate(contours):
print "Contour: %d" % idx
contour_area = cv2.contourArea(contour)
if float(contour_area / image_area) < min_plate_to_image_ratio:
print "Contour %d under threshold. Countour Area: %f" % (idx, contour_area)
continue
elif float(contour_area / image_area) > max_plate_to_image_ratio:
print "Contour %d over threshold. Countour Area: %f" % (idx, contour_area)
continue
approx = cv2.approxPolyDP(
contour,
0.02 * cv2.arcLength(contour, True),
True)
print "\n -"
print "%d. Countour Area: %f, Arclength: %f, Polygon %d colour:%s" % (idx,
contour_area,
cv2.arcLength(contour, True),
len(approx),
colours[idx%len(colours)])
minarea_rectangle = cv2.minAreaRect(contour)
minarea_box = cv2.cv.BoxPoints(minarea_rectangle)
print "> ", minarea_rectangle
print ">> ", minarea_box
centre, width_and_height, theta = minarea_rectangle
aspect_ratio = float(max(width_and_height) / min(width_and_height))
print " aspect ratio: %f for %s " % (aspect_ratio, width_and_height)
minarea_box = np.int0(minarea_box)
cv2.drawContours(license_plate, [minarea_box], 0, (255,0,255), 2)
cv2.drawContours(
license_plate,
[contours[idx]],
0,
colours[idx%len(colours)])
# Aspect ratio removal
if aspect_ratio < 3 or aspect_ratio > 5:
print " Aspect ratio bounds fails"
continue
# Rectangles have polygon shape 4
if len(approx) == 4:
# Select the largest rect
rectangles.append(contour)
if contour_area > largest_rectangle_area :
largest_rectangle_area = contour_area
largest_rectangle_idx = idx
print "Probable plate hit is %d" % largest_rectangle_idx
cv2.drawContours(
license_plate,
[contours[largest_rectangle_idx]],
0,
colours[0],
idx + 1)
cv2.imwrite("%s_contours_colored.png" % fname_prefix, license_plate)
# Create a mask for the detected plate
#hull = cv2.convexHull(contours[largest_rectangle_idx])
# This bounding rectangle does not consider rotation
license_plate = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_COLOR)
bounding_rectangle = cv2.boundingRect(contours[largest_rectangle_idx])
b_rect_x, b_rect_y, b_rect_w, b_rect_h = bounding_rectangle
plate_rectangle = (b_rect_x, b_rect_y, b_rect_w, b_rect_h)
print "Plate rectangle is: ", plate_rectangle
cv2.rectangle(license_plate, (b_rect_x, b_rect_y), (b_rect_x + b_rect_w, b_rect_y + b_rect_h), (0, 255, 0), 2)
cv2.imwrite("%s_bounding_box.png" % fname_prefix, license_plate)
license_plate = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_COLOR)
minarea_rectangle = cv2.minAreaRect(contours[largest_rectangle_idx])
minarea_box = cv2.cv.BoxPoints(minarea_rectangle)
minarea_box = np.int0(minarea_box)
cv2.drawContours(license_plate, [minarea_box], 0, (0,0,255), 2)
cv2.imwrite("%s_bounding_box_minarea.png" % fname_prefix, license_plate)
| xmnlab/minilab | ia/ocr/alpr.py | Python | gpl-3.0 | 10,141 |
# gcp xml backend
# Copyright (C) 2012 Jesse van den Kieboom <jessevdk@gnome.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject, Gcp
from document import Document
class Backend(GObject.Object, Gcp.Backend):
size = GObject.property(type=int, flags = GObject.PARAM_READABLE)
def __init__(self):
GObject.Object.__init__(self)
self.documents = []
def do_get_property(self, spec):
if spec.name == 'size':
return len(self.documents)
GObject.Object.do_get_property(self, spec)
def do_register_document(self, doc):
d = Document(document=doc)
self.documents.append(d)
d.connect('changed', self.on_document_changed)
return d
def do_unregister_document(self, doc):
doc.disconnect_by_func(self.on_document_changed)
self.documents.remove(doc)
def do_get(self, idx):
return self.documents[idx]
def on_document_changed(self, doc):
doc.update()
# ex:ts=4:et:
| jonocodes/gedit-code-assistance | backends/xml/gcpbackendxml/backend.py | Python | gpl-3.0 | 1,683 |
from __future__ import unicode_literals
from memory.mem import _Memory
class Windows2003ServerMemory(_Memory):
def __init__(self, params):
super(Windows2003ServerMemory, self).__init__(params)
def csv_all_modules_dll(self):
super(Windows2003ServerMemory, self)._csv_all_modules_dll()
def csv_all_modules_opened_files(self):
super(Windows2003ServerMemory, self)._csv_all_modules_opened_files() | SeungGiJeong/SK_FastIR | memory/windows2003ServerMemory.py | Python | gpl-3.0 | 432 |
#!/usr/bin/env python
# encoding: utf-8
#
# AuthorDetector
# Copyright (C) 2013 Larroque Stephen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from auxlib import *
from collections import OrderedDict
json = import_module('ujson')
if json is None:
json = import_module('json')
if json is None:
raise RuntimeError('Unable to find a json implementation')
## ConfigParser
#
# Configuration parser, will parse and load in memory the configuration and commandline switches
class ConfigParser(object):
# Configuration file path
configfile = 'config.json'
# Configuration parameters tree (will be referenced by almost all other objects across the whole application)
config = []
## Constructor
def __init__(self, *args, **kwargs):
return object.__init__(self, *args, **kwargs)
## Initialize the ConfigParser object by checking that the configuration file exists
# @param configfile Path to the configuration file (must exists or else the application will crash!)
def init(self, configfile=None, *args, **kwargs):
if configfile:
try:
with open(configfile): pass # check that the file exists
self.configfile = configfile
except IOError, e:
print "Can't open the specified configuration file %s, error: %s" % (configfile, str(e))
return
## Load a configuration file into the local dict
# @param pargs Recognized (processed) commandline arguments (this will overwrite parameters from the configuration file in case of conflicts)
# @param extras Unrecognized (unprocessed) commandline arguments (will also overwrite parameters from the configuration file)
# @param comments If set to true, Javascript-like comments will be filtered from the configuration file
def load(self, pargs=None, extras=None, comments=True, *args, **kwargs):
# Loading the configuration file
with file(self.configfile) as f:
# If there are comments in the config, filter them before converting the json to a Python object
if comments:
self.config = json.loads(self._removecomments(f.read()))
# Else we can directly load the json
else:
self.config = json.loads(f.read())
# Overwriting with recognized commandline switches
if pargs:
for key, value in pargs.iteritems():
# only add the argument in config if the argument has a value (not False nor None) and this key is not already defined in the config (so an argument can only overwrite config if defined)
if not (self.config.has_key(key) and not value):
self.config[key] = value
# Overwriting with extras commandline switches
if extras:
i = 0
while i < len(extras):
key = extras[i]
# Check if the argument accepts a value
if '--' in key and i+1 < len(extras) and not '--' in extras[i+1]: # if the argument begins with --, and there is an argument after this one, and the next argument is in fact a value (does not begin with --), we store it with the value
self.config[key.lstrip('-')] = extras[i+1]
i += 1 # skip the next argument (which we used as a value)
# Else this argument has no value, we just set it to True
else:
self.config[key.lstrip('-')] = True
i += 1
## Reload the configuration file
def reload(self, *args, **kwargs):
self.load(comments=True, *args, **kwargs)
## Save the current configuration (with commandline arguments processed) into a file
# @param file Path to where the configuration file should be saved
def save(self, file, *args, **kwargs):
with open(file, 'wb') as f: # open in binary mode to avoid line returns translation (else the reading will be flawed!). We have to do it both at saving and at reading.
f.write( json.dumps(self.config, sort_keys=True, indent=4) ) # write the config as a json serialized string, but beautified to be more human readable
return True
# Get a value from the config dict (this is a proxy method)
def get(self, *args, **kwargs):
if isinstance(self.config, (dict, OrderedDict)):
return self.config.get(*args, **kwargs)
else:
# Safe list getter, with exception handling and default value supplied
try:
return self.config[args[0]]
except IndexError:
if len(args > 1):
return args[1]
else: # by default if no default value was specified, we return None (just like for dictionaries)
return None
# Set a value in the config dict (this is a proxy method)
def set(self, *args, **kwargs):
return self.config.update(*args, **kwargs)
# Set a value in the config dict (this is a proxy method)
def update(self, *args, **kwargs):
return self.config.update(*args, **kwargs)
## Filter efficiently Javascript-like inline and multiline comments from a JSON file
# Author: WizKid https://gist.github.com/WizKid/1170297
# @param s string to filter
# @return string filtered string without comments
def _removecomments(self, s):
inCommentSingle = False
inCommentMulti = False
inString = False
t = []
l = len(s)
i = 0
fromIndex = 0
while i < l:
c = s[i]
if not inCommentMulti and not inCommentSingle:
if c == '"':
slashes = 0
for j in xrange(i - 1, 0, -1):
if s[j] != '\\':
break
slashes += 1
if slashes % 2 == 0:
inString = not inString
elif not inString:
if c == '#':
inCommentSingle = True
t.append(s[fromIndex:i])
elif c == '/' and i + 1 < l:
cn = s[i + 1]
if cn == '/':
inCommentSingle = True
t.append(s[fromIndex:i])
i += 1
elif cn == '*':
inCommentMulti = True
t.append(s[fromIndex:i])
i += 1
elif inCommentSingle and (c == '\n' or c == '\r'):
inCommentSingle = False
fromIndex = i
elif inCommentMulti and c == '*' and i + 1 < l and s[i + 1] == '/':
inCommentMulti = False
i += 1
fromIndex = i + 1
i += 1
if not inCommentSingle and not inCommentMulti:
t.append(s[fromIndex:len(s)])
return "".join(t)
| lrq3000/author-detector | authordetector/configparser.py | Python | gpl-3.0 | 7,707 |
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('postgresql://igsql:igsql@127.0.0.1/igsql')
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import igsql.model
Base.metadata.create_all(bind=engine)
| karlward/igsql | igsql/database.py | Python | gpl-3.0 | 537 |
# coding: utf8
# topicview.py
# 1/2/2015 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
import json
from functools import partial
from PySide.QtCore import Qt, QObject
from Qt5 import QtWidgets
from sakurakit import skevents, skqss
from sakurakit.skclass import Q_Q, memoized, memoizedproperty
from sakurakit.skdebug import dprint, dwarn
from sakurakit.sktr import tr_
from sakurakit.skwebkit import SkWebView #, SkWebViewBean
from sakurakit.skwidgets import SkTitlelessDockWidget, SkStyleView
#from sakurakit.skqml import QmlObject
from mytr import mytr_
import comets, config, dataman, netman, osutil, rc
@Q_Q
class _TopicView(object):
def __init__(self, q):
self.topicId = 0 # long
self.topicComet = None
self._createUi(q)
#shortcut('ctrl+n', self._new, parent=q)
def _createUi(self, q):
q.setCentralWidget(self.webView)
dock = SkTitlelessDockWidget(self.inspector)
dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
#dock.setAllowedAreas(Qt.BottomDockWidgetArea)
q.addDockWidget(Qt.BottomDockWidgetArea, dock)
def clear(self):
self.setTopicId(0)
def setTopicId(self, topicId): # long ->
if self.topicId != topicId:
self.topicId = topicId
if not topicId:
if self.topicComet:
self.topicComet.setActive(False)
else:
if not self.topicComet:
self.topicComet = comets.createPostComet()
qml = self.topicComet.q
#qml.topicDataReceived.connect(self._onTopicReceived)
qml.topicDataUpdated.connect(self._onTopicUpdated)
qml.postDataUpdated.connect(self._onPostUpdated)
qml.postDataReceived.connect(self._onPostReceived)
path = 'topic/%s' % topicId
self.topicComet.setPath(path)
if netman.manager().isOnline():
self.topicComet.setActive(True)
def _injectBeans(self):
h = self.webView.page().mainFrame()
#h.addToJavaScriptWindowObject('bean', self._webBean)
for name,obj in self._beans:
h.addToJavaScriptWindowObject(name, obj)
@memoizedproperty
def _beans(self):
"""
return [(unicode name, QObject bean)]
"""
import coffeebean
m = coffeebean.manager()
return (
('cacheBean', m.cacheBean),
('i18nBean', m.i18nBean),
('mainBean', m.mainBean),
('topicEditBean', self.topicEditBean),
#('topicInputBean', self.topicInputBean),
('postEditBean', self.postEditBean),
('postInputBean', self.postInputBean),
)
@memoizedproperty
def postEditBean(self):
import postedit
return postedit.PostEditorManagerBean(parent=self.q, manager=self.postEditorManager)
@memoizedproperty
def postInputBean(self):
import postinput
return postinput.PostInputManagerBean(parent=self.q, manager=self.postInputManager)
@memoizedproperty
def topicEditBean(self):
import topicedit
return topicedit.TopicEditorManagerBean(parent=self.q, manager=self.topicEditorManager)
#@memoizedproperty
#def topicInputBean(self):
# import topicinput
# return topicinput.TopicInputManagerBean(parent=self.q, manager=self.topicInputManager)
@memoizedproperty
def postEditorManager(self):
import postedit
ret = postedit.PostEditorManager(self.q)
ret.postChanged.connect(self._updatePost)
return ret
@memoizedproperty
def postInputManager(self):
import postinput
ret = postinput.PostInputManager(self.q)
ret.postReceived.connect(self._submitPost)
return ret
@memoizedproperty
def topicEditorManager(self):
import topicedit
ret = topicedit.TopicEditorManager(self.q)
ret.topicChanged.connect(self._updateTopic)
return ret
#@memoizedproperty
#def topicInputManager(self):
# import topicinput
# ret = topicinput.TopicInputManager(self.q)
# ret.topicReceived.connect(self._submitTopic)
# return ret
def _submitPost(self, postData, imageData):
if self.topicId and netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().submitPost,
postData, imageData,
topicId=self.topicId))
def _updatePost(self, postData, imageData):
if self.topicId and netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().updatePost,
postData, imageData))
#def _submitTopic(self, topicData, imageData, ticketData):
# subjectId = self.subjectId
# if subjectId:
# subjectType = 'game'
# else:
# subjectId = config.GLOBAL_SUBJECT_ID
# subjectType = 'subject'
# if netman.manager().isOnline():
# import forumapi
# skevents.runlater(partial(forumapi.manager().submitTopic,
# topicData, imageData, ticketData,
# subjectId=subjectId, subjectType=subjectType))
def _updateTopic(self, topicData, imageData, ticketData):
if netman.manager().isOnline():
import forumapi
skevents.runlater(partial(forumapi.manager().updateTopic,
topicData, imageData, ticketData))
def _onPostReceived(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['topicId']
if topicId == self.topicId and self.q.isVisible():
self.addPost(data)
dprint("pass")
except Exception, e:
dwarn(e)
def _onPostUpdated(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['topicId']
if topicId == self.topicId and self.q.isVisible():
self.updatePost(data)
dprint("pass")
except Exception, e:
dwarn(e)
#def _onTopicReceived(self, data): # str ->
# try:
# obj = json.loads(data)
# subjectId = obj['subjectId']
# if subjectId == self.subjectId and self.q.isVisible():
# self.addTopic(data)
# dprint("pass")
# except Exception, e:
# dwarn(e)
def _onTopicUpdated(self, data): # str ->
try:
obj = json.loads(data)
topicId = obj['id']
if topicId == self.topicId and self.q.isVisible():
self.updateTopic(data)
dprint("pass")
except Exception, e:
dwarn(e)
@memoizedproperty
def webView(self):
from PySide.QtWebKit import QWebPage
ret = SkWebView()
ret.titleChanged.connect(self.q.setWindowTitle)
ret.enableHighlight() # highlight selected text
ret.ignoreSslErrors() # needed to access Twitter
ret.pageAction(QWebPage.Reload).triggered.connect(
self.refresh, Qt.QueuedConnection)
ret.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks) # Since there are local images
ret.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff) # disable horizontal scroll
#ret.page().setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)
ret.linkClicked.connect(osutil.open_url)
return ret
def refresh(self):
"""@reimp"""
self.newPostButton.setVisible(bool(self.topicId))
#self.gameButton.setVisible(bool(self.subjectId))
host = config.API_HOST # must be the same as rest.coffee for the same origin policy
user = dataman.manager().user()
w = self.webView
w.setHtml(rc.haml_template('haml/reader/topicview').render({
'host': host,
'locale': config.language2htmllocale(user.language),
'title': tr_("Topic"),
'topicId': self.topicId,
'userName': user.name if not user.isGuest() else '',
'userPassword': user.password,
'rc': rc,
'tr': tr_,
}), host)
self._injectBeans()
@memoizedproperty
def inspector(self):
ret = SkStyleView()
skqss.class_(ret, 'texture')
layout = QtWidgets.QHBoxLayout()
layout.addWidget(self.newPostButton)
#layout.addWidget(self.newTopicButton)
layout.addStretch()
layout.addWidget(self.browseButton)
#layout.addWidget(self.gameButton)
layout.addWidget(self.refreshButton)
ret.setLayout(layout)
layout.setContentsMargins(4, 4, 4, 4)
return ret
@memoizedproperty
def refreshButton(self):
ret = QtWidgets.QPushButton(tr_("Refresh"))
skqss.class_(ret, 'btn btn-primary')
ret.setToolTip(tr_("Refresh") + " (Ctrl+R)")
#ret.setStatusTip(ret.toolTip())
ret.clicked.connect(self.refresh)
#nm = netman.manager()
#ret.setEnabled(nm.isOnline())
#nm.onlineChanged.connect(ret.setEnabled)
return ret
#@memoizedproperty
#def gameButton(self):
# ret = QtWidgets.QPushButton(tr_("Game"))
# skqss.class_(ret, 'btn btn-info')
# ret.setToolTip(tr_("Game"))
# #ret.setStatusTip(ret.toolTip())
# ret.clicked.connect(self._showGame)
# return ret
#def _showGame(self):
# import main
# main.manager().showGameView(itemId=self.subjectId)
@memoizedproperty
def browseButton(self):
ret = QtWidgets.QPushButton(tr_("Browse"))
skqss.class_(ret, 'btn btn-default')
ret.setToolTip(tr_("Browse"))
#ret.setStatusTip(ret.toolTip())
ret.clicked.connect(lambda:
osutil.open_url("http://sakuradite.com/topic/%s" % self.topicId))
return ret
@memoizedproperty
def newPostButton(self):
ret = QtWidgets.QPushButton("+ " + tr_("Reply"))
skqss.class_(ret, 'btn btn-primary')
ret.setToolTip(tr_("New"))
#ret.setStatusTip(ret.toolTip())
ret.clicked.connect(self._newPost)
return ret
def _newPost(self):
self.postInputManager.newPost(self.topicId)
# append ;null for better performance
def addPost(self, data): # unicode json ->
js = 'if (window.READY) addPost(%s); null' % data
self.webView.evaljs(js)
# append ;null for better performance
def updatePost(self, data): # unicode json ->
js = 'if (window.READY) updatePost(%s); null' % data
self.webView.evaljs(js)
# append ;null for better performance
#def addTopic(self, data): # unicode json ->
# js = 'if (window.READY) addTopic(%s); null' % data
# self.webView.evaljs(js)
# append ;null for better performance
def updateTopic(self, data): # unicode json ->
js = 'if (window.READY) updateTopic(%s); null' % data
self.webView.evaljs(js)
class TopicView(QtWidgets.QMainWindow):
def __init__(self, parent=None):
WINDOW_FLAGS = Qt.Dialog|Qt.WindowMinMaxButtonsHint
super(TopicView, self).__init__(parent, WINDOW_FLAGS)
self.setWindowIcon(rc.icon('window-forum'))
self.setWindowTitle(tr_("Topic"))
self.__d = _TopicView(self)
def refresh(self): self.__d.refresh()
def clear(self): self.__d.clear()
def subjectId(self): return self.__d.subjectId
def setSubjectId(self, subjectId): self.__d.setSubjectId(subjectId)
def topicId(self): return self.__d.topicId
def setTopicId(self, topicId): self.__d.setTopicId(topicId)
def setVisible(self, value):
"""@reimp @public"""
if value and not self.isVisible():
self.__d.refresh()
super(TopicView, self).setVisible(value)
if not value:
self.__d.webView.clear()
self.__d.clear()
class _TopicViewManager:
def __init__(self):
self.dialogs = []
def _createDialog(self):
import windows
parent = windows.normal()
ret = TopicView(parent=parent)
ret.resize(550, 580)
return ret
def getDialog(self, topicId=0):
"""
@param* subjectId long
@param* topicId long
@return TopicView or None
"""
for w in self.dialogs:
if w.isVisible() and topicId == w.topicId():
return w
def createDialog(self, topicId=0):
"""
@param* subjectId long
@param* topicId long
@return TopicView
"""
w = self.getDialog(topicId)
if w:
w.refresh()
return w
for w in self.dialogs:
if not w.isVisible():
w.clear()
w.setTopicId(topicId)
return w
w = self._createDialog()
w.setTopicId(topicId)
self.dialogs.append(w)
return w
class TopicViewManager:
def __init__(self):
self.__d = _TopicViewManager()
#def clear(self): self.hide()
def isViewVisible(self, *args, **kwargs):
return bool(self.__d.getDialog(*args, **kwargs))
def isVisible(self):
if self.__d.dialogs:
for w in self.__d.dialogs:
if w.isVisible():
return True
return False
def hide(self):
if self.__d.dialogs:
for w in self.__d.dialogs:
if w.isVisible():
w.hide()
def show(self, topicId):
"""
@param* subjectId long subject ID
@param* topicId long chatroom topicId
"""
w = self.__d.createDialog(topicId)
w.show()
w.raise_()
@memoized
def manager():
import webrc
webrc.init()
return TopicViewManager()
#@QmlObject
#class TopicViewManagerProxy(QObject):
# def __init__(self, parent=None):
# super(TopicViewManagerProxy, self).__init__(parent)
#
# @Slot(int)
# def showTopic(self, id):
# manager().showTopic(id)
if __name__ == '__main__':
a = debug.app()
#manager().showTopic('global')
manager().showTopic(config.GLOBAL_TOPIC_ID)
a.exec_()
# EOF
| Dangetsu/vnr | Frameworks/Sakura/py/apps/reader/views/topicview.py | Python | gpl-3.0 | 12,896 |
import unittest
import numpy as np
from ..emissions import GaussianEmissions, MultinomialEmissions
from ..hsmm import GaussianHSMM, MultinomialHSMM
class TestHSMMWrappers(unittest.TestCase):
def setUp(self):
# Exact values don't matter
self.tmat = np.eye(3)
self.durations = np.eye(3)
def test_gaussian_hsmm(self):
means = np.array([1.0, 2.0, 3.0])
scales = np.array([0.5, 0.4, 0.3])
hsmm = GaussianHSMM(means, scales, self.durations, self.tmat)
self.assertIsInstance(hsmm.emissions, GaussianEmissions)
np.testing.assert_array_equal(hsmm.emissions.means, means)
np.testing.assert_array_equal(hsmm.emissions.scales, scales)
def test_gaussian_hsmm_means_scales(self):
means = np.array([1.0, 2.0, 3.0])
scales = np.array([0.5, 0.4, 0.3])
hsmm = GaussianHSMM(means, scales, self.durations, self.tmat)
# Test property getters
np.testing.assert_array_equal(hsmm.means, means)
np.testing.assert_array_equal(hsmm.scales, scales)
# Now update properties and check that the value changed on the
# emissions.
new_means = np.array([5.0, 5.0, 5.0])
new_scales = np.array([1.0, 1.0, 1.0])
hsmm.means = new_means
hsmm.scales = new_scales
emissions = hsmm.emissions
np.testing.assert_array_equal(emissions.means, new_means)
np.testing.assert_array_equal(emissions.scales, new_scales)
def test_multinomial_hsmm(self):
ps = np.ones((3, 5))
hsmm = MultinomialHSMM(ps, self.durations, self.tmat)
self.assertIsInstance(hsmm.emissions, MultinomialEmissions)
np.testing.assert_array_equal(hsmm.emissions._probabilities, ps)
| jvkersch/hsmmlearn | hsmmlearn/tests/test_hsmm_wrappers.py | Python | gpl-3.0 | 1,757 |
import base64
import imghdr
import six
import uuid
from django.core.files.base import ContentFile
from rest_framework import serializers
from .models import Scholar, Record
class ScholarSerializer(serializers.ModelSerializer):
class Meta:
model = Scholar
fields = (
'pk',
'photo',
'name',
'sex',
'birth_date',
'school_class',
'is_studying')
class RecordSerializer(serializers.ModelSerializer):
class Meta:
model = Record
fields = (
'pk',
'scholar',
'date',
'has_came_with',
'time_arrived',
'time_departed'
)
| mxmaslin/Test-tasks | django_test_tasks/old_django_test_tasks/apps/playschool/serializers.py | Python | gpl-3.0 | 721 |
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
import unittest
from concurrence import dispatch, Tasklet
import mg.test.testorm
from mg.core.memcached import Memcached
from mg.core.cass import CassandraPool
class TestORM_Storage2(mg.test.testorm.TestORM):
def setUp(self):
mg.test.testorm.TestORM.setUp(self)
self.db.storage = 2
self.db.app = "testapp"
def main():
mg.test.testorm.cleanup()
unittest.main()
if __name__ == "__main__":
dispatch(main)
| JoyTeam/metagam | mg/test/testorm-2.py | Python | gpl-3.0 | 1,140 |
from marshmallow import (
Schema,
fields,
validates_schema,
ValidationError,
validates_schema,
EXCLUDE,
)
from marshmallow.validate import Regexp
MAP_1 = {
"name": "OpenStreetMap",
"layer": "//{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png",
"attribution": "© OpenStreetMap",
}
MAP_2 = {
"name": "OpenTopoMap",
"layer": "//a.tile.opentopomap.org/{z}/{x}/{y}.png",
"attribution": "© OpenStreetMap-contributors, SRTM | Style: © OpenTopoMap (CC-BY-SA)",
}
LANGUAGES = {
"en": {
"name": "English",
"flag_icon": "flag-icon-gb",
"months": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
},
"fr": {
"name": "Français",
"flag_icon": "flag-icon-fr",
"months": [
"Janvier",
"Février",
"Mars",
"Avril",
"Mai",
"Juin",
"Juillet",
"Août",
"Septembre",
"Octobre",
"Novembre",
"Decembre",
],
},
"it": {
"name": "Italiano",
"flag_icon": "flag-icon-it",
"months": [
"Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Agosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre",
],
},
}
class SecretSchemaConf(Schema):
class Meta:
unknown = EXCLUDE
database_connection = fields.String(
required=True,
validate=Regexp(
"^postgresql:\/\/.*:.*@[^:]+:\w+\/\w+$",
error="Database uri is invalid ex: postgresql://monuser:monpass@server:port/db_name",
),
)
GUNICORN_PORT = fields.Integer(load_default=8080)
modeDebug = fields.Boolean(load_default=False)
SECRET_KEY = fields.String(required=True)
class MapConfig(Schema):
LAT_LONG = fields.List(fields.Float(), load_default=[44.7952, 6.2287])
MIN_ZOOM = fields.Integer(load_default=1)
MAX_BOUNDS = fields.List(
fields.List(fields.Float()), load_default=[[-180, -90], [180, 90]]
)
FIRST_MAP = fields.Dict(load_default=MAP_1)
SECOND_MAP = fields.Dict(load_default=MAP_2)
ZOOM = fields.Integer(load_default=10)
STEP = fields.Integer(load_default=1)
BORDERS_COLOR = fields.String(load_default="#000000")
BORDERS_WEIGHT = fields.Integer(load_default=3)
ENABLE_SLIDER = fields.Boolean(load_default=True)
ENABLE_SCALE = fields.Boolean(load_default=True)
MASK_STYLE = fields.Dict(
load_default={"fill": False, "fillColor": "#020202", "fillOpacity": 0.3}
)
class AtlasConfig(Schema):
class Meta:
unknown = EXCLUDE
STRUCTURE = fields.String(load_default="Nom de la structure")
NOM_APPLICATION = fields.String(load_default="Nom de l'application")
CUSTOM_LOGO_LINK = fields.String(load_default="")
URL_APPLICATION = fields.String(load_default="")
DEFAULT_LANGUAGE = fields.String(load_default="fr")
MULTILINGUAL = fields.Boolean(load_default=False)
ID_GOOGLE_ANALYTICS = fields.String(load_default="UA-xxxxxxx-xx")
ORGANISM_MODULE = fields.Boolean(load_default="False")
GLOSSAIRE = fields.Boolean(load_default=False)
IGNAPIKEY = fields.String(load_default="")
AFFICHAGE_INTRODUCTION = fields.Boolean(load_default=True)
AFFICHAGE_LOGOS_HOME = fields.Boolean(load_default=True)
AFFICHAGE_FOOTER = fields.Boolean(load_default=True)
AFFICHAGE_STAT_GLOBALES = fields.Boolean(load_default=True)
AFFICHAGE_DERNIERES_OBS = fields.Boolean(load_default=True)
AFFICHAGE_EN_CE_MOMENT = fields.Boolean(load_default=True)
AFFICHAGE_RANG_STAT = fields.Boolean(load_default=True)
AFFICHAGE_NOUVELLES_ESPECES = fields.Boolean(load_default=True)
AFFICHAGE_RECHERCHE_AVANCEE = fields.Boolean(load_default=False)
RANG_STAT = fields.List(
fields.Dict,
load_default=[
{"phylum": ["Arthropoda", "Mollusca"]},
{"phylum": ["Chordata"]},
{"regne": ["Plantae"]},
],
)
RANG_STAT_FR = fields.List(
fields.String, load_default=["Faune invertébrée", "Faune vertébrée", "Flore"]
)
LIMIT_RANG_TAXONOMIQUE_HIERARCHIE = fields.Integer(load_default=13)
LIMIT_FICHE_LISTE_HIERARCHY = fields.Integer(load_default=28)
REMOTE_MEDIAS_URL = fields.String(load_default="http://mondomaine.fr/taxhub/")
REMOTE_MEDIAS_PATH = fields.String(load_default="static/medias/")
REDIMENSIONNEMENT_IMAGE = fields.Boolean(load_default=False)
TAXHUB_URL = fields.String(required=False, load_default=None)
ATTR_DESC = fields.Integer(load_default=100)
ATTR_COMMENTAIRE = fields.Integer(load_default=101)
ATTR_MILIEU = fields.Integer(load_default=102)
ATTR_CHOROLOGIE = fields.Integer(load_default=103)
ATTR_MAIN_PHOTO = fields.Integer(load_default=1)
ATTR_OTHER_PHOTO = fields.Integer(load_default=2)
ATTR_LIEN = fields.Integer(load_default=3)
ATTR_PDF = fields.Integer(load_default=4)
ATTR_AUDIO = fields.Integer(load_default=5)
ATTR_VIDEO_HEBERGEE = fields.Integer(load_default=6)
ATTR_YOUTUBE = fields.Integer(load_default=7)
ATTR_DAILYMOTION = fields.Integer(load_default=8)
ATTR_VIMEO = fields.Integer(load_default=9)
PROTECTION = fields.Boolean(load_default=False)
DISPLAY_PATRIMONIALITE = fields.Boolean(load_default=False)
PATRIMONIALITE = fields.Dict(
load_default={
"label": "Patrimonial",
"config": {
"oui": {
"icon": "custom/images/logo_patrimonial.png",
"text": "Ce taxon est patrimonial",
}
},
}
)
STATIC_PAGES = fields.Dict(
load_default={
"presentation": {
"title": "Présentation de l'atlas",
"picto": "fa-question-circle",
"order": 0,
"template": "static/custom/templates/presentation.html",
}
}
)
AFFICHAGE_MAILLE = fields.Boolean(load_default=False)
ZOOM_LEVEL_POINT = fields.Integer(load_default=11)
LIMIT_CLUSTER_POINT = fields.Integer(load_default=1000)
NB_DAY_LAST_OBS = fields.String(load_default="7")
NB_LAST_OBS = fields.Integer(load_default=100)
TEXT_LAST_OBS = fields.String(
load_default="Les observations des agents ces 7 derniers jours |"
)
ANONYMIZE = fields.Boolean(load_default=False)
MAP = fields.Nested(MapConfig, load_default=dict())
# coupe le nom_vernaculaire à la 1ere virgule sur les fiches espèces
SPLIT_NOM_VERN = fields.Integer(load_default=True)
INTERACTIVE_MAP_LIST = fields.Boolean(load_default=True)
AVAILABLE_LANGUAGES = fields.Dict(load_default=LANGUAGES)
@validates_schema
def validate_url_taxhub(self, data, **kwargs):
"""
TAXHHUB_URL doit être rempli si REDIMENSIONNEMENT_IMAGE = True
"""
if data["REDIMENSIONNEMENT_IMAGE"] and data["TAXHUB_URL"] is None:
raise ValidationError(
{
"Le champ TAXHUB_URL doit être rempli si REDIMENSIONNEMENT_IMAGE = True"
}
)
| PnEcrins/GeoNature-atlas | atlas/configuration/config_schema.py | Python | gpl-3.0 | 7,525 |
"""
This file is part of py-sonic.
py-sonic is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
py-sonic is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with py-sonic. If not, see <http://www.gnu.org/licenses/>
"""
from base64 import b64encode
from urllib import urlencode
from .errors import *
from pprint import pprint
from cStringIO import StringIO
from netrc import netrc
import json, urllib2, httplib, logging, socket, ssl, sys
API_VERSION = '1.13.0'
logger = logging.getLogger(__name__)
class HTTPSConnectionChain(httplib.HTTPSConnection):
_preferred_ssl_protos = sorted([ p for p in dir(ssl)
if p.startswith('PROTOCOL_') ], reverse=True)
_ssl_working_proto = None
def _create_sock(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
return sock
def connect(self):
if self._ssl_working_proto is not None:
# If we have a working proto, let's use that straight away
logger.debug("Using known working proto: '%s'",
self._ssl_working_proto)
sock = self._create_sock()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=self._ssl_working_proto)
return
# Try connecting via the different SSL protos in preference order
for proto_name in self._preferred_ssl_protos:
sock = self._create_sock()
proto = getattr(ssl, proto_name, None)
try:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=proto)
except:
sock.close()
else:
# Cache the working ssl version
HTTPSConnectionChain._ssl_working_proto = proto
break
class HTTPSHandlerChain(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnectionChain, req)
# install opener
urllib2.install_opener(urllib2.build_opener(HTTPSHandlerChain()))
class PysHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""
This class is used to override the default behavior of the
HTTPRedirectHandler, which does *not* redirect POST data
"""
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
data = None
if req.has_data():
data = req.get_data()
return urllib2.Request(newurl,
data=data,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class Connection(object):
def __init__(self, baseUrl, username=None, password=None, port=4040,
serverPath='/rest', appName='py-sonic', apiVersion=API_VERSION,
insecure=False, useNetrc=None):
"""
This will create a connection to your subsonic server
baseUrl:str The base url for your server. Be sure to use
"https" for SSL connections. If you are using
a port other than the default 4040, be sure to
specify that with the port argument. Do *not*
append it here.
ex: http://subsonic.example.com
If you are running subsonic under a different
path, specify that with the "serverPath" arg,
*not* here. For example, if your subsonic
lives at:
https://mydomain.com:8080/path/to/subsonic/rest
You would set the following:
baseUrl = "https://mydomain.com"
port = 8080
serverPath = "/path/to/subsonic/rest"
username:str The username to use for the connection. This
can be None if `useNetrc' is True (and you
have a valid entry in your netrc file)
password:str The password to use for the connection. This
can be None if `useNetrc' is True (and you
have a valid entry in your netrc file)
port:int The port number to connect on. The default for
unencrypted subsonic connections is 4040
serverPath:str The base resource path for the subsonic views.
This is useful if you have your subsonic server
behind a proxy and the path that you are proxying
is different from the default of '/rest'.
Ex:
serverPath='/path/to/subs'
The full url that would be built then would be
(assuming defaults and using "example.com" and
you are using the "ping" view):
http://example.com:4040/path/to/subs/ping.view
appName:str The name of your application.
apiVersion:str The API version you wish to use for your
application. Subsonic will throw an error if you
try to use/send an api version higher than what
the server supports. See the Subsonic API docs
to find the Subsonic version -> API version table.
This is useful if you are connecting to an older
version of Subsonic.
insecure:bool This will allow you to use self signed
certificates when connecting if set to True.
useNetrc:str|bool You can either specify a specific netrc
formatted file or True to use your default
netrc file ($HOME/.netrc).
"""
self._baseUrl = baseUrl
self._hostname = baseUrl.split('://')[1].strip()
self._username = username
self._rawPass = password
self._netrc = None
if useNetrc is not None:
self._process_netrc(useNetrc)
elif username is None or password is None:
raise CredentialError('You must specify either a username/password '
'combination or "useNetrc" must be either True or a string '
'representing a path to a netrc file')
self._port = int(port)
self._apiVersion = apiVersion
self._appName = appName
self._serverPath = serverPath.strip('/')
self._insecure = insecure
self._opener = self._getOpener(self._username, self._rawPass)
# Properties
def setBaseUrl(self, url):
self._baseUrl = url
self._opener = self._getOpener(self._username, self._rawPass)
baseUrl = property(lambda s: s._baseUrl, setBaseUrl)
def setPort(self, port):
self._port = int(port)
port = property(lambda s: s._port, setPort)
def setUsername(self, username):
self._username = username
self._opener = self._getOpener(self._username, self._rawPass)
username = property(lambda s: s._username, setUsername)
def setPassword(self, password):
self._rawPass = password
# Redo the opener with the new creds
self._opener = self._getOpener(self._username, self._rawPass)
password = property(lambda s: s._rawPass, setPassword)
apiVersion = property(lambda s: s._apiVersion)
def setAppName(self, appName):
self._appName = appName
appName = property(lambda s: s._appName, setAppName)
def setServerPath(self, path):
self._serverPath = path.strip('/')
serverPath = property(lambda s: s._serverPath, setServerPath)
def setInsecure(self, insecure):
self._insecure = insecure
insecure = property(lambda s: s._insecure, setInsecure)
# API methods
def ping(self):
"""
since: 1.0.0
Returns a boolean True if the server is alive, False otherwise
"""
methodName = 'ping'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
try:
res = self._doInfoReq(req)
except:
return False
if res['status'] == 'ok':
return True
elif res['status'] == 'failed':
exc = getExcByCode(res['error']['code'])
raise exc(res['error']['message'])
return False
def getLicense(self):
"""
since: 1.0.0
Gets details related to the software license
Returns a dict like the following:
{u'license': {u'date': u'2010-05-21T11:14:39',
u'email': u'email@example.com',
u'key': u'12345678901234567890123456789012',
u'valid': True},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getLicense'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getMusicFolders(self):
"""
since: 1.0.0
Returns all configured music folders
Returns a dict like the following:
{u'musicFolders': {u'musicFolder': [{u'id': 0, u'name': u'folder1'},
{u'id': 1, u'name': u'folder2'},
{u'id': 2, u'name': u'folder3'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getMusicFolders'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getNowPlaying(self):
"""
since: 1.0.0
Returns what is currently being played by all users
Returns a dict like the following:
{u'nowPlaying': {u'entry': {u'album': u"Jazz 'Round Midnight 12",
u'artist': u'Astrud Gilberto',
u'bitRate': 172,
u'contentType': u'audio/mpeg',
u'coverArt': u'98349284',
u'duration': 325,
u'genre': u'Jazz',
u'id': u'2424324',
u'isDir': False,
u'isVideo': False,
u'minutesAgo': 0,
u'parent': u'542352',
u'path': u"Astrud Gilberto/Jazz 'Round Midnight 12/01 - The Girl From Ipanema.mp3",
u'playerId': 1,
u'size': 7004089,
u'suffix': u'mp3',
u'title': u'The Girl From Ipanema',
u'track': 1,
u'username': u'user1',
u'year': 1996}},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getNowPlaying'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getIndexes(self, musicFolderId=None, ifModifiedSince=0):
"""
since: 1.0.0
Returns an indexed structure of all artists
musicFolderId:int If this is specified, it will only return
artists for the given folder ID from
the getMusicFolders call
ifModifiedSince:int If specified, return a result if the artist
collection has changed since the given
unix timestamp
Returns a dict like the following:
{u'indexes': {u'index': [{u'artist': [{u'id': u'29834728934',
u'name': u'A Perfect Circle'},
{u'id': u'238472893',
u'name': u'A Small Good Thing'},
{u'id': u'9327842983',
u'name': u'A Tribe Called Quest'},
{u'id': u'29348729874',
u'name': u'A-Teens, The'},
{u'id': u'298472938',
u'name': u'ABA STRUCTURE'}],
u'lastModified': 1303318347000L},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getIndexes'
viewName = '%s.view' % methodName
q = self._getQueryDict({'musicFolderId': musicFolderId,
'ifModifiedSince': self._ts2milli(ifModifiedSince)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
self._fixLastModified(res)
return res
def getMusicDirectory(self, mid):
"""
since: 1.0.0
Returns a listing of all files in a music directory. Typically used
to get a list of albums for an artist or list of songs for an album.
mid:str The string ID value which uniquely identifies the
folder. Obtained via calls to getIndexes or
getMusicDirectory. REQUIRED
Returns a dict like the following:
{u'directory': {u'child': [{u'artist': u'A Tribe Called Quest',
u'coverArt': u'223484',
u'id': u'329084',
u'isDir': True,
u'parent': u'234823940',
u'title': u'Beats, Rhymes And Life'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'234823794',
u'id': u'238472893',
u'isDir': True,
u'parent': u'2308472938',
u'title': u'Midnight Marauders'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'39284792374',
u'id': u'983274892',
u'isDir': True,
u'parent': u'9823749',
u'title': u"People's Instinctive Travels And The Paths Of Rhythm"},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'289347293',
u'id': u'3894723934',
u'isDir': True,
u'parent': u'9832942',
u'title': u'The Anthology'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'923847923',
u'id': u'29834729',
u'isDir': True,
u'parent': u'2934872893',
u'title': u'The Love Movement'},
{u'artist': u'A Tribe Called Quest',
u'coverArt': u'9238742893',
u'id': u'238947293',
u'isDir': True,
u'parent': u'9432878492',
u'title': u'The Low End Theory'}],
u'id': u'329847293',
u'name': u'A Tribe Called Quest'},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getMusicDirectory'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': mid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search(self, artist=None, album=None, title=None, any=None,
count=20, offset=0, newerThan=None):
"""
since: 1.0.0
DEPRECATED SINCE API 1.4.0! USE search2() INSTEAD!
Returns a listing of files matching the given search criteria.
Supports paging with offset
artist:str Search for artist
album:str Search for album
title:str Search for title of song
any:str Search all fields
count:int Max number of results to return [default: 20]
offset:int Search result offset. For paging [default: 0]
newerThan:int Return matches newer than this timestamp
"""
if artist == album == title == any == None:
raise ArgumentError('Invalid search. You must supply search '
'criteria')
methodName = 'search'
viewName = '%s.view' % methodName
q = self._getQueryDict({'artist': artist, 'album': album,
'title': title, 'any': any, 'count': count, 'offset': offset,
'newerThan': self._ts2milli(newerThan)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search2(self, query, artistCount=20, artistOffset=0, albumCount=20,
albumOffset=0, songCount=20, songOffset=0, musicFolderId=None):
"""
since: 1.4.0
Returns albums, artists and songs matching the given search criteria.
Supports paging through the result.
query:str The search query
artistCount:int Max number of artists to return [default: 20]
artistOffset:int Search offset for artists (for paging) [default: 0]
albumCount:int Max number of albums to return [default: 20]
albumOffset:int Search offset for albums (for paging) [default: 0]
songCount:int Max number of songs to return [default: 20]
songOffset:int Search offset for songs (for paging) [default: 0]
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns a dict like the following:
{u'searchResult2': {u'album': [{u'artist': u'A Tribe Called Quest',
u'coverArt': u'289347',
u'id': u'32487298',
u'isDir': True,
u'parent': u'98374289',
u'title': u'The Love Movement'}],
u'artist': [{u'id': u'2947839',
u'name': u'A Tribe Called Quest'},
{u'id': u'239847239',
u'name': u'Tribe'}],
u'song': [{u'album': u'Beats, Rhymes And Life',
u'artist': u'A Tribe Called Quest',
u'bitRate': 224,
u'contentType': u'audio/mpeg',
u'coverArt': u'329847',
u'duration': 148,
u'genre': u'default',
u'id': u'3928472893',
u'isDir': False,
u'isVideo': False,
u'parent': u'23984728394',
u'path': u'A Tribe Called Quest/Beats, Rhymes And Life/A Tribe Called Quest - Beats, Rhymes And Life - 03 - Motivators.mp3',
u'size': 4171913,
u'suffix': u'mp3',
u'title': u'Motivators',
u'track': 3}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'search2'
viewName = '%s.view' % methodName
q = self._getQueryDict({'query': query, 'artistCount': artistCount,
'artistOffset': artistOffset, 'albumCount': albumCount,
'albumOffset': albumOffset, 'songCount': songCount,
'songOffset': songOffset, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def search3(self, query, artistCount=20, artistOffset=0, albumCount=20,
albumOffset=0, songCount=20, songOffset=0, musicFolderId=None):
"""
since: 1.8.0
Works the same way as search2, but uses ID3 tags for
organization
query:str The search query
artistCount:int Max number of artists to return [default: 20]
artistOffset:int Search offset for artists (for paging) [default: 0]
albumCount:int Max number of albums to return [default: 20]
albumOffset:int Search offset for albums (for paging) [default: 0]
songCount:int Max number of songs to return [default: 20]
songOffset:int Search offset for songs (for paging) [default: 0]
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns a dict like the following (search for "Tune Yards":
{u'searchResult3': {u'album': [{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-7',
u'created': u'2012-01-30T12:35:33',
u'duration': 3229,
u'id': 7,
u'name': u'Bird-Brains',
u'songCount': 13},
{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-8',
u'created': u'2011-03-22T15:08:00',
u'duration': 2531,
u'id': 8,
u'name': u'W H O K I L L',
u'songCount': 10}],
u'artist': {u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'song': [{u'album': u'Bird-Brains',
u'albumId': 7,
u'artist': u'Tune-Yards',
u'artistId': 1,
u'bitRate': 160,
u'contentType': u'audio/mpeg',
u'coverArt': 105,
u'created': u'2012-01-30T12:35:33',
u'duration': 328,
u'genre': u'Lo-Fi',
u'id': 107,
u'isDir': False,
u'isVideo': False,
u'parent': 105,
u'path': u'Tune Yards/Bird-Brains/10-tune-yards-fiya.mp3',
u'size': 6588498,
u'suffix': u'mp3',
u'title': u'Fiya',
u'track': 10,
u'type': u'music',
u'year': 2009}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'search3'
viewName = '%s.view' % methodName
q = self._getQueryDict({'query': query, 'artistCount': artistCount,
'artistOffset': artistOffset, 'albumCount': albumCount,
'albumOffset': albumOffset, 'songCount': songCount,
'songOffset': songOffset, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlaylists(self, username=None):
"""
since: 1.0.0
Returns the ID and name of all saved playlists
The "username" option was added in 1.8.0.
username:str If specified, return playlists for this user
rather than for the authenticated user. The
authenticated user must have admin role
if this parameter is used
Returns a dict like the following:
{u'playlists': {u'playlist': [{u'id': u'62656174732e6d3375',
u'name': u'beats'},
{u'id': u'766172696574792e6d3375',
u'name': u'variety'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getPlaylists'
viewName = '%s.view' % methodName
q = self._getQueryDict({'username': username})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlaylist(self, pid):
"""
since: 1.0.0
Returns a listing of files in a saved playlist
id:str The ID of the playlist as returned in getPlaylists()
Returns a dict like the following:
{u'playlist': {u'entry': {u'album': u'The Essential Bob Dylan',
u'artist': u'Bob Dylan',
u'bitRate': 32,
u'contentType': u'audio/mpeg',
u'coverArt': u'2983478293',
u'duration': 984,
u'genre': u'Classic Rock',
u'id': u'982739428',
u'isDir': False,
u'isVideo': False,
u'parent': u'98327428974',
u'path': u"Bob Dylan/Essential Bob Dylan Disc 1/Bob Dylan - The Essential Bob Dylan - 03 - The Times They Are A-Changin'.mp3",
u'size': 3921899,
u'suffix': u'mp3',
u'title': u"The Times They Are A-Changin'",
u'track': 3},
u'id': u'44796c616e2e6d3375',
u'name': u'Dylan'},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getPlaylist'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': pid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createPlaylist(self, playlistId=None, name=None, songIds=[]):
"""
since: 1.2.0
Creates OR updates a playlist. If updating the list, the
playlistId is required. If creating a list, the name is required.
playlistId:str The ID of the playlist to UPDATE
name:str The name of the playlist to CREATE
songIds:list The list of songIds to populate the list with in
either create or update mode. Note that this
list will replace the existing list if updating
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'createPlaylist'
viewName = '%s.view' % methodName
if playlistId == name == None:
raise ArgumentError('You must supply either a playlistId or a name')
if playlistId is not None and name is not None:
raise ArgumentError('You can only supply either a playlistId '
'OR a name, not both')
q = self._getQueryDict({'playlistId': playlistId, 'name': name})
req = self._getRequestWithList(viewName, 'songId', songIds, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePlaylist(self, pid):
"""
since: 1.2.0
Deletes a saved playlist
pid:str ID of the playlist to delete, as obtained by getPlaylists
Returns a dict like the following:
"""
methodName = 'deletePlaylist'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': pid})
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def download(self, sid):
"""
since: 1.0.0
Downloads a given music file.
sid:str The ID of the music file to download.
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'download'
viewName = '%s.view' % methodName
req = self._getRequest(viewName, {'id': sid})
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def stream(self, sid, maxBitRate=0, tformat=None, timeOffset=None,
size=None, estimateContentLength=False):
"""
since: 1.0.0
Downloads a given music file.
sid:str The ID of the music file to download.
maxBitRate:int (since: 1.2.0) If specified, the server will
attempt to limit the bitrate to this value, in
kilobits per second. If set to zero (default), no limit
is imposed. Legal values are: 0, 32, 40, 48, 56, 64,
80, 96, 112, 128, 160, 192, 224, 256 and 320.
tformat:str (since: 1.6.0) Specifies the target format
(e.g. "mp3" or "flv") in case there are multiple
applicable transcodings (since: 1.9.0) You can use
the special value "raw" to disable transcoding
timeOffset:int (since: 1.6.0) Only applicable to video
streaming. Start the stream at the given
offset (in seconds) into the video
size:str (since: 1.6.0) The requested video size in
WxH, for instance 640x480
estimateContentLength:bool (since: 1.8.0) If set to True,
the HTTP Content-Length header
will be set to an estimated
value for trancoded media
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'stream'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': sid, 'maxBitRate': maxBitRate,
'format': tformat, 'timeOffset': timeOffset, 'size': size,
'estimateContentLength': estimateContentLength})
req = self._getRequest(viewName, q)
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def getCoverArt(self, aid, size=None):
"""
since: 1.0.0
Returns a cover art image
aid:str ID string for the cover art image to download
size:int If specified, scale image to this size
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'getCoverArt'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': aid, 'size': size})
req = self._getRequest(viewName, q)
res = self._doBinReq(req)
if isinstance(res, dict):
self._checkStatus(res)
return res
def scrobble(self, sid, submission=True, listenTime=None):
"""
since: 1.5.0
"Scrobbles" a given music file on last.fm. Requires that the user
has set this up.
Since 1.8.0 you may specify multiple id (and optionally time)
parameters to scrobble multiple files.
Since 1.11.0 this method will also update the play count and
last played timestamp for the song and album. It will also make
the song appear in the "Now playing" page in the web app, and
appear in the list of songs returned by getNowPlaying
sid:str The ID of the file to scrobble
submission:bool Whether this is a "submission" or a "now playing"
notification
listenTime:int (Since 1.8.0) The time (unix timestamp) at
which the song was listened to.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'scrobble'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': sid, 'submission': submission,
'time': self._ts2milli(listenTime)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def changePassword(self, username, password):
"""
since: 1.1.0
Changes the password of an existing Subsonic user. Note that the
user performing this must have admin privileges
username:str The username whose password is being changed
password:str The new password of the user
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'changePassword'
viewName = '%s.view' % methodName
hexPass = 'enc:%s' % self._hexEnc(password)
# There seems to be an issue with some subsonic implementations
# not recognizing the "enc:" precursor to the encoded password and
# encodes the whole "enc:<hex>" as the password. Weird.
#q = {'username': username, 'password': hexPass.lower()}
q = {'username': username, 'password': password}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getUser(self, username):
"""
since: 1.3.0
Get details about a given user, including which auth roles it has.
Can be used to enable/disable certain features in the client, such
as jukebox control
username:str The username to retrieve. You can only retrieve
your own user unless you have admin privs.
Returns a dict like the following:
{u'status': u'ok',
u'user': {u'adminRole': False,
u'commentRole': False,
u'coverArtRole': False,
u'downloadRole': True,
u'jukeboxRole': False,
u'playlistRole': True,
u'podcastRole': False,
u'settingsRole': True,
u'streamRole': True,
u'uploadRole': True,
u'username': u'test'},
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getUser'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getUsers(self):
"""
since 1.8.0
Gets a list of users
returns a dict like the following
{u'status': u'ok',
u'users': {u'user': [{u'adminRole': True,
u'commentRole': True,
u'coverArtRole': True,
u'downloadRole': True,
u'jukeboxRole': True,
u'playlistRole': True,
u'podcastRole': True,
u'scrobblingEnabled': True,
u'settingsRole': True,
u'shareRole': True,
u'streamRole': True,
u'uploadRole': True,
u'username': u'user1'},
...
...
]},
u'version': u'1.10.2',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getUsers'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createUser(self, username, password, email,
ldapAuthenticated=False, adminRole=False, settingsRole=True,
streamRole=True, jukeboxRole=False, downloadRole=False,
uploadRole=False, playlistRole=False, coverArtRole=False,
commentRole=False, podcastRole=False, shareRole=False,
musicFolderId=None):
"""
since: 1.1.0
Creates a new subsonic user, using the parameters defined. See the
documentation at http://subsonic.org for more info on all the roles.
username:str The username of the new user
password:str The password for the new user
email:str The email of the new user
<For info on the boolean roles, see http://subsonic.org for more info>
musicFolderId:int These are the only folders the user has access to
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'createUser'
viewName = '%s.view' % methodName
hexPass = 'enc:%s' % self._hexEnc(password)
q = self._getQueryDict({
'username': username, 'password': hexPass, 'email': email,
'ldapAuthenticated': ldapAuthenticated, 'adminRole': adminRole,
'settingsRole': settingsRole, 'streamRole': streamRole,
'jukeboxRole': jukeboxRole, 'downloadRole': downloadRole,
'uploadRole': uploadRole, 'playlistRole': playlistRole,
'coverArtRole': coverArtRole, 'commentRole': commentRole,
'podcastRole': podcastRole, 'shareRole': shareRole,
'musicFolderId': musicFolderId
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updateUser(self, username, password=None, email=None,
ldapAuthenticated=False, adminRole=False, settingsRole=True,
streamRole=True, jukeboxRole=False, downloadRole=False,
uploadRole=False, playlistRole=False, coverArtRole=False,
commentRole=False, podcastRole=False, shareRole=False,
musicFolderId=None, maxBitRate=0):
"""
since 1.10.1
Modifies an existing Subsonic user.
username:str The username of the user to update.
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
maxBitRate:int The max bitrate for the user. 0 is unlimited
All other args are the same as create user and you can update
whatever item you wish to update for the given username.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'updateUser'
viewName = '%s.view' % methodName
if password is not None:
password = 'enc:%s' % self._hexEnc(password)
q = self._getQueryDict({'username': username, 'password': password,
'email': email, 'ldapAuthenticated': ldapAuthenticated,
'adminRole': adminRole,
'settingsRole': settingsRole, 'streamRole': streamRole,
'jukeboxRole': jukeboxRole, 'downloadRole': downloadRole,
'uploadRole': uploadRole, 'playlistRole': playlistRole,
'coverArtRole': coverArtRole, 'commentRole': commentRole,
'podcastRole': podcastRole, 'shareRole': shareRole,
'musicFolderId': musicFolderId, 'maxBitRate': maxBitRate
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteUser(self, username):
"""
since: 1.3.0
Deletes an existing Subsonic user. Of course, you must have admin
rights for this.
username:str The username of the user to delete
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'deleteUser'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getChatMessages(self, since=1):
"""
since: 1.2.0
Returns the current visible (non-expired) chat messages.
since:int Only return messages newer than this timestamp
NOTE: All times returned are in MILLISECONDS since the Epoch, not
seconds!
Returns a dict like the following:
{u'chatMessages': {u'chatMessage': {u'message': u'testing 123',
u'time': 1303411919872L,
u'username': u'admin'}},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getChatMessages'
viewName = '%s.view' % methodName
q = {'since': self._ts2milli(since)}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def addChatMessage(self, message):
"""
since: 1.2.0
Adds a message to the chat log
message:str The message to add
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'addChatMessage'
viewName = '%s.view' % methodName
q = {'message': message}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbumList(self, ltype, size=10, offset=0, fromYear=None,
toYear=None, genre=None, musicFolderId=None):
"""
since: 1.2.0
Returns a list of random, newest, highest rated etc. albums.
Similar to the album lists on the home page of the Subsonic
web interface
ltype:str The list type. Must be one of the following: random,
newest, highest, frequent, recent,
(since 1.8.0 -> )starred, alphabeticalByName,
alphabeticalByArtist
Since 1.10.1 you can use byYear and byGenre to
list albums in a given year range or genre.
size:int The number of albums to return. Max 500
offset:int The list offset. Use for paging. Max 5000
fromYear:int If you specify the ltype as "byYear", you *must*
specify fromYear
toYear:int If you specify the ltype as "byYear", you *must*
specify toYear
genre:str The name of the genre e.g. "Rock". You must specify
genre if you set the ltype to "byGenre"
musicFolderId:str Only return albums in the music folder with
the given ID. See getMusicFolders()
Returns a dict like the following:
{u'albumList': {u'album': [{u'artist': u'Hank Williams',
u'id': u'3264928374',
u'isDir': True,
u'parent': u'9238479283',
u'title': u'The Original Singles Collection...Plus'},
{u'artist': u'Freundeskreis',
u'coverArt': u'9823749823',
u'id': u'23492834',
u'isDir': True,
u'parent': u'9827492374',
u'title': u'Quadratur des Kreises'}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbumList'
viewName = '%s.view' % methodName
q = self._getQueryDict({'type': ltype, 'size': size,
'offset': offset, 'fromYear': fromYear, 'toYear': toYear,
'genre': genre, 'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbumList2(self, ltype, size=10, offset=0, fromYear=None,
toYear=None, genre=None):
"""
since 1.8.0
Returns a list of random, newest, highest rated etc. albums.
This is similar to getAlbumList, but uses ID3 tags for
organization
ltype:str The list type. Must be one of the following: random,
newest, highest, frequent, recent,
(since 1.8.0 -> )starred, alphabeticalByName,
alphabeticalByArtist
Since 1.10.1 you can use byYear and byGenre to
list albums in a given year range or genre.
size:int The number of albums to return. Max 500
offset:int The list offset. Use for paging. Max 5000
fromYear:int If you specify the ltype as "byYear", you *must*
specify fromYear
toYear:int If you specify the ltype as "byYear", you *must*
specify toYear
genre:str The name of the genre e.g. "Rock". You must specify
genre if you set the ltype to "byGenre"
Returns a dict like the following:
{u'albumList2': {u'album': [{u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-0',
u'created': u'2009-08-28T10:00:44',
u'duration': 3762,
u'id': 0,
u'name': u'100th Window',
u'songCount': 9},
{u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-5',
u'created': u'2003-11-03T22:00:00',
u'duration': 2715,
u'id': 5,
u'name': u'Blue Lines',
u'songCount': 9}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbumList2'
viewName = '%s.view' % methodName
q = self._getQueryDict({'type': ltype, 'size': size,
'offset': offset, 'fromYear': fromYear, 'toYear': toYear,
'genre': genre})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getRandomSongs(self, size=10, genre=None, fromYear=None,
toYear=None, musicFolderId=None):
"""
since 1.2.0
Returns random songs matching the given criteria
size:int The max number of songs to return. Max 500
genre:str Only return songs from this genre
fromYear:int Only return songs after or in this year
toYear:int Only return songs before or in this year
musicFolderId:str Only return songs in the music folder with the
given ID. See getMusicFolders
Returns a dict like the following:
{u'randomSongs': {u'song': [{u'album': u'1998 EP - Airbag (How Am I Driving)',
u'artist': u'Radiohead',
u'bitRate': 320,
u'contentType': u'audio/mpeg',
u'duration': 129,
u'id': u'9284728934',
u'isDir': False,
u'isVideo': False,
u'parent': u'983249823',
u'path': u'Radiohead/1998 EP - Airbag (How Am I Driving)/06 - Melatonin.mp3',
u'size': 5177469,
u'suffix': u'mp3',
u'title': u'Melatonin'},
{u'album': u'Mezmerize',
u'artist': u'System Of A Down',
u'bitRate': 214,
u'contentType': u'audio/mpeg',
u'coverArt': u'23849372894',
u'duration': 176,
u'id': u'28937492834',
u'isDir': False,
u'isVideo': False,
u'parent': u'92837492837',
u'path': u'System Of A Down/Mesmerize/10 - System Of A Down - Old School Hollywood.mp3',
u'size': 4751360,
u'suffix': u'mp3',
u'title': u'Old School Hollywood',
u'track': 10}]},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getRandomSongs'
viewName = '%s.view' % methodName
q = self._getQueryDict({'size': size, 'genre': genre,
'fromYear': fromYear, 'toYear': toYear,
'musicFolderId': musicFolderId})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getLyrics(self, artist=None, title=None):
"""
since: 1.2.0
Searches for and returns lyrics for a given song
artist:str The artist name
title:str The song title
Returns a dict like the following for
getLyrics('Bob Dylan', 'Blowin in the wind'):
{u'lyrics': {u'artist': u'Bob Dylan',
u'content': u"How many roads must a man walk down<snip>",
u'title': u"Blowin' in the Wind"},
u'status': u'ok',
u'version': u'1.5.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getLyrics'
viewName = '%s.view' % methodName
q = self._getQueryDict({'artist': artist, 'title': title})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def jukeboxControl(self, action, index=None, sids=[], gain=None,
offset=None):
"""
since: 1.2.0
NOTE: Some options were added as of API version 1.7.0
Controls the jukebox, i.e., playback directly on the server's
audio hardware. Note: The user must be authorized to control
the jukebox
action:str The operation to perform. Must be one of: get,
start, stop, skip, add, clear, remove, shuffle,
setGain, status (added in API 1.7.0),
set (added in API 1.7.0)
index:int Used by skip and remove. Zero-based index of the
song to skip to or remove.
sids:str Used by "add" and "set". ID of song to add to the
jukebox playlist. Use multiple id parameters to
add many songs in the same request. Whether you
are passing one song or many into this, this
parameter MUST be a list
gain:float Used by setGain to control the playback volume.
A float value between 0.0 and 1.0
offset:int (added in API 1.7.0) Used by "skip". Start playing
this many seconds into the track.
"""
methodName = 'jukeboxControl'
viewName = '%s.view' % methodName
q = self._getQueryDict({'action': action, 'index': index,
'gain': gain, 'offset': offset})
req = None
if action == 'add':
# We have to deal with the sids
if not (isinstance(sids, list) or isinstance(sids, tuple)):
raise ArgumentError('If you are adding songs, "sids" must '
'be a list or tuple!')
req = self._getRequestWithList(viewName, 'id', sids, q)
else:
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPodcasts(self, incEpisodes=True, pid=None):
"""
since: 1.6.0
Returns all podcast channels the server subscribes to and their
episodes.
incEpisodes:bool (since: 1.9.0) Whether to include Podcast
episodes in the returned result.
pid:str (since: 1.9.0) If specified, only return
the Podcast channel with this ID.
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.6.0',
u'xmlns': u'http://subsonic.org/restapi',
u'podcasts': {u'channel': {u'description': u"Dr Chris Smith...",
u'episode': [{u'album': u'Dr Karl and the Naked Scientist',
u'artist': u'BBC Radio 5 live',
u'bitRate': 64,
u'contentType': u'audio/mpeg',
u'coverArt': u'2f6f7074',
u'description': u'Dr Karl answers all your science related questions.',
u'duration': 2902,
u'genre': u'Podcast',
u'id': 0,
u'isDir': False,
u'isVideo': False,
u'parent': u'2f6f70742f737562736f6e69632f706f6463617374732f4472204b61726c20616e6420746865204e616b656420536369656e74697374',
u'publishDate': u'2011-08-17 22:06:00.0',
u'size': 23313059,
u'status': u'completed',
u'streamId': u'2f6f70742f737562736f6e69632f706f6463617374732f4472204b61726c20616e6420746865204e616b656420536369656e746973742f64726b61726c5f32303131303831382d30343036612e6d7033',
u'suffix': u'mp3',
u'title': u'DrKarl: Peppermints, Chillies & Receptors',
u'year': 2011},
{u'description': u'which is warmer, a bath with bubbles in it or one without? Just one of the stranger science stories tackled this week by Dr Chris Smith and the Naked Scientists!',
u'id': 1,
u'publishDate': u'2011-08-14 21:05:00.0',
u'status': u'skipped',
u'title': u'DrKarl: how many bubbles in your bath? 15 AUG 11'},
...
{u'description': u'Dr Karl joins Rhod to answer all your science questions',
u'id': 9,
u'publishDate': u'2011-07-06 22:12:00.0',
u'status': u'skipped',
u'title': u'DrKarl: 8 Jul 11 The Strange Sound of the MRI Scanner'}],
u'id': 0,
u'status': u'completed',
u'title': u'Dr Karl and the Naked Scientist',
u'url': u'http://downloads.bbc.co.uk/podcasts/fivelive/drkarl/rss.xml'}}
}
See also: http://subsonic.svn.sourceforge.net/viewvc/subsonic/trunk/subsonic-main/src/main/webapp/xsd/podcasts_example_1.xml?view=markup
"""
methodName = 'getPodcasts'
viewName = '%s.view' % methodName
q = self._getQueryDict({'includeEpisodes': incEpisodes,
'id': pid})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getShares(self):
"""
since: 1.6.0
Returns information about shared media this user is allowed to manage
Note that entry can be either a single dict or a list of dicts
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.6.0',
u'xmlns': u'http://subsonic.org/restapi',
u'shares': {u'share': [
{u'created': u'2011-08-18T10:01:35',
u'entry': {u'artist': u'Alice In Chains',
u'coverArt': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e732f416c69636520496e20436861696e732f636f7665722e6a7067',
u'id': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e732f416c69636520496e20436861696e73',
u'isDir': True,
u'parent': u'2f66696c65732f6d7033732f412d4d2f416c69636520496e20436861696e73',
u'title': u'Alice In Chains'},
u'expires': u'2012-08-18T10:01:35',
u'id': 0,
u'url': u'http://crustymonkey.subsonic.org/share/BuLbF',
u'username': u'admin',
u'visitCount': 0
}]}
}
"""
methodName = 'getShares'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createShare(self, shids=[], description=None, expires=None):
"""
since: 1.6.0
Creates a public URL that can be used by anyone to stream music
or video from the Subsonic server. The URL is short and suitable
for posting on Facebook, Twitter etc. Note: The user must be
authorized to share (see Settings > Users > User is allowed to
share files with anyone).
shids:list[str] A list of ids of songs, albums or videos
to share.
description:str A description that will be displayed to
people visiting the shared media
(optional).
expires:float A timestamp pertaining to the time at
which this should expire (optional)
This returns a structure like you would get back from getShares()
containing just your new share.
"""
methodName = 'createShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'description': description,
'expires': self._ts2milli(expires)})
req = self._getRequestWithList(viewName, 'id', shids, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updateShare(self, shid, description=None, expires=None):
"""
since: 1.6.0
Updates the description and/or expiration date for an existing share
shid:str The id of the share to update
description:str The new description for the share (optional).
expires:float The new timestamp for the expiration time of this
share (optional).
"""
methodName = 'updateShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': shid, 'description': description,
expires: self._ts2milli(expires)})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteShare(self, shid):
"""
since: 1.6.0
Deletes an existing share
shid:str The id of the share to delete
Returns a standard response dict
"""
methodName = 'deleteShare'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': shid})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def setRating(self, id, rating):
"""
since: 1.6.0
Sets the rating for a music file
id:str The id of the item (song/artist/album) to rate
rating:int The rating between 1 and 5 (inclusive), or 0 to remove
the rating
Returns a standard response dict
"""
methodName = 'setRating'
viewName = '%s.view' % methodName
try:
rating = int(rating)
except:
raise ArgumentError('Rating must be an integer between 0 and 5: '
'%r' % rating)
if rating < 0 or rating > 5:
raise ArgumentError('Rating must be an integer between 0 and 5: '
'%r' % rating)
q = self._getQueryDict({'id': id, 'rating': rating})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtists(self):
"""
since 1.8.0
Similar to getIndexes(), but this method uses the ID3 tags to
determine the artist
Returns a dict like the following:
{u'artists': {u'index': [{u'artist': {u'albumCount': 7,
u'coverArt': u'ar-0',
u'id': 0,
u'name': u'Massive Attack'},
u'name': u'M'},
{u'artist': {u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'name': u'T'}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getArtists'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtist(self, id):
"""
since 1.8.0
Returns the info (albums) for an artist. This method uses
the ID3 tags for organization
id:str The artist ID
Returns a dict like the following:
{u'artist': {u'album': [{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-7',
u'created': u'2012-01-30T12:35:33',
u'duration': 3229,
u'id': 7,
u'name': u'Bird-Brains',
u'songCount': 13},
{u'artist': u'Tune-Yards',
u'artistId': 1,
u'coverArt': u'al-8',
u'created': u'2011-03-22T15:08:00',
u'duration': 2531,
u'id': 8,
u'name': u'W H O K I L L',
u'songCount': 10}],
u'albumCount': 2,
u'coverArt': u'ar-1',
u'id': 1,
u'name': u'Tune-Yards'},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getArtist'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAlbum(self, id):
"""
since 1.8.0
Returns the info and songs for an album. This method uses
the ID3 tags for organization
id:str The album ID
Returns a dict like the following:
{u'album': {u'artist': u'Massive Attack',
u'artistId': 0,
u'coverArt': u'al-0',
u'created': u'2009-08-28T10:00:44',
u'duration': 3762,
u'id': 0,
u'name': u'100th Window',
u'song': [{u'album': u'100th Window',
u'albumId': 0,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 192,
u'contentType': u'audio/mpeg',
u'coverArt': 2,
u'created': u'2009-08-28T10:00:57',
u'duration': 341,
u'genre': u'Rock',
u'id': 14,
u'isDir': False,
u'isVideo': False,
u'parent': 2,
u'path': u'Massive Attack/100th Window/01 - Future Proof.mp3',
u'size': 8184445,
u'suffix': u'mp3',
u'title': u'Future Proof',
u'track': 1,
u'type': u'music',
u'year': 2003}],
u'songCount': 9},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getAlbum'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSong(self, id):
"""
since 1.8.0
Returns the info for a song. This method uses the ID3
tags for organization
id:str The song ID
Returns a dict like the following:
{u'song': {u'album': u'W H O K I L L',
u'albumId': 8,
u'artist': u'Tune-Yards',
u'artistId': 1,
u'bitRate': 320,
u'contentType': u'audio/mpeg',
u'coverArt': 106,
u'created': u'2011-03-22T15:08:00',
u'discNumber': 1,
u'duration': 192,
u'genre': u'Indie Rock',
u'id': 120,
u'isDir': False,
u'isVideo': False,
u'parent': 106,
u'path': u'Tune Yards/Who Kill/10 Killa.mp3',
u'size': 7692656,
u'suffix': u'mp3',
u'title': u'Killa',
u'track': 10,
u'type': u'music',
u'year': 2011},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getSong'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': id})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getVideos(self):
"""
since 1.8.0
Returns all video files
Returns a dict like the following:
{u'status': u'ok',
u'version': u'1.8.0',
u'videos': {u'video': {u'bitRate': 384,
u'contentType': u'video/x-matroska',
u'created': u'2012-08-26T13:36:44',
u'duration': 1301,
u'id': 130,
u'isDir': False,
u'isVideo': True,
u'path': u'South Park - 16x07 - Cartman Finds Love.mkv',
u'size': 287309613,
u'suffix': u'mkv',
u'title': u'South Park - 16x07 - Cartman Finds Love',
u'transcodedContentType': u'video/x-flv',
u'transcodedSuffix': u'flv'}},
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getVideos'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getStarred(self, musicFolderId=None):
"""
since 1.8.0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns starred songs, albums and artists
Returns a dict like the following:
{u'starred': {u'album': {u'album': u'Bird-Brains',
u'artist': u'Tune-Yards',
u'coverArt': 105,
u'created': u'2012-01-30T13:16:58',
u'id': 105,
u'isDir': True,
u'parent': 104,
u'starred': u'2012-08-26T13:18:34',
u'title': u'Bird-Brains'},
u'song': [{u'album': u'Mezzanine',
u'albumId': 4,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 256,
u'contentType': u'audio/mpeg',
u'coverArt': 6,
u'created': u'2009-06-15T07:48:28',
u'duration': 298,
u'genre': u'Dub',
u'id': 72,
u'isDir': False,
u'isVideo': False,
u'parent': 6,
u'path': u'Massive Attack/Mezzanine/Massive Attack_02_mezzanine.mp3',
u'size': 9564160,
u'starred': u'2012-08-26T13:19:26',
u'suffix': u'mp3',
u'title': u'Risingson',
u'track': 2,
u'type': u'music'},
{u'album': u'Mezzanine',
u'albumId': 4,
u'artist': u'Massive Attack',
u'artistId': 0,
u'bitRate': 256,
u'contentType': u'audio/mpeg',
u'coverArt': 6,
u'created': u'2009-06-15T07:48:25',
u'duration': 380,
u'genre': u'Dub',
u'id': 71,
u'isDir': False,
u'isVideo': False,
u'parent': 6,
u'path': u'Massive Attack/Mezzanine/Massive Attack_01_mezzanine.mp3',
u'size': 12179456,
u'starred': u'2012-08-26T13:19:03',
u'suffix': u'mp3',
u'title': u'Angel',
u'track': 1,
u'type': u'music'}]},
u'status': u'ok',
u'version': u'1.8.0',
u'xmlns': u'http://subsonic.org/restapi'}
"""
methodName = 'getStarred'
viewName = '%s.view' % methodName
q = {}
if musicFolderId:
q['musicFolderId'] = musicFolderId
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getStarred2(self, musicFolderId=None):
"""
since 1.8.0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
Returns starred songs, albums and artists like getStarred(),
but this uses ID3 tags for organization
Returns a dict like the following:
**See the output from getStarred()**
"""
methodName = 'getStarred2'
viewName = '%s.view' % methodName
q = {}
if musicFolderId:
q['musicFolderId'] = musicFolderId
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def updatePlaylist(self, lid, name=None, comment=None, songIdsToAdd=[],
songIndexesToRemove=[]):
"""
since 1.8.0
Updates a playlist. Only the owner of a playlist is allowed to
update it.
lid:str The playlist id
name:str The human readable name of the playlist
comment:str The playlist comment
songIdsToAdd:list A list of song IDs to add to the playlist
songIndexesToRemove:list Remove the songs at the
0 BASED INDEXED POSITIONS in the
playlist, NOT the song ids. Note that
this is always a list.
Returns a normal status response dict
"""
methodName = 'updatePlaylist'
viewName = '%s.view' % methodName
q = self._getQueryDict({'playlistId': lid, 'name': name,
'comment': comment})
if not isinstance(songIdsToAdd, list) or isinstance(songIdsToAdd,
tuple):
songIdsToAdd = [songIdsToAdd]
if not isinstance(songIndexesToRemove, list) or isinstance(
songIndexesToRemove, tuple):
songIndexesToRemove = [songIndexesToRemove]
listMap = {'songIdToAdd': songIdsToAdd,
'songIndexToRemove': songIndexesToRemove}
req = self._getRequestWithLists(viewName, listMap, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getAvatar(self, username):
"""
since 1.8.0
Returns the avatar for a user or None if the avatar does not exist
username:str The user to retrieve the avatar for
Returns the file-like object for reading or raises an exception
on error
"""
methodName = 'getAvatar'
viewName = '%s.view' % methodName
q = {'username': username}
req = self._getRequest(viewName, q)
try:
res = self._doBinReq(req)
except urllib2.HTTPError:
# Avatar is not set/does not exist, return None
return None
if isinstance(res, dict):
self._checkStatus(res)
return res
def star(self, sids=[], albumIds=[], artistIds=[]):
"""
since 1.8.0
Attaches a star to songs, albums or artists
sids:list A list of song IDs to star
albumIds:list A list of album IDs to star. Use this rather than
"sids" if the client access the media collection
according to ID3 tags rather than file
structure
artistIds:list The ID of an artist to star. Use this rather
than sids if the client access the media
collection according to ID3 tags rather
than file structure
Returns a normal status response dict
"""
methodName = 'star'
viewName = '%s.view' % methodName
if not isinstance(sids, list) or isinstance(sids, tuple):
sids = [sids]
if not isinstance(albumIds, list) or isinstance(albumIds, tuple):
albumIds = [albumIds]
if not isinstance(artistIds, list) or isinstance(artistIds, tuple):
artistIds = [artistIds]
listMap = {'id': sids,
'albumId': albumIds,
'artistId': artistIds}
req = self._getRequestWithLists(viewName, listMap)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def unstar(self, sids=[], albumIds=[], artistIds=[]):
"""
since 1.8.0
Removes a star to songs, albums or artists. Basically, the
same as star in reverse
sids:list A list of song IDs to star
albumIds:list A list of album IDs to star. Use this rather than
"sids" if the client access the media collection
according to ID3 tags rather than file
structure
artistIds:list The ID of an artist to star. Use this rather
than sids if the client access the media
collection according to ID3 tags rather
than file structure
Returns a normal status response dict
"""
methodName = 'unstar'
viewName = '%s.view' % methodName
if not isinstance(sids, list) or isinstance(sids, tuple):
sids = [sids]
if not isinstance(albumIds, list) or isinstance(albumIds, tuple):
albumIds = [albumIds]
if not isinstance(artistIds, list) or isinstance(artistIds, tuple):
artistIds = [artistIds]
listMap = {'id': sids,
'albumId': albumIds,
'artistId': artistIds}
req = self._getRequestWithLists(viewName, listMap)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getGenres(self):
"""
since 1.9.0
Returns all genres
"""
methodName = 'getGenres'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSongsByGenre(self, genre, count=10, offset=0, musicFolderId=None):
"""
since 1.9.0
Returns songs in a given genre
genre:str The genre, as returned by getGenres()
count:int The maximum number of songs to return. Max is 500
default: 10
offset:int The offset if you are paging. default: 0
musicFolderId:int Only return results from the music folder
with the given ID. See getMusicFolders
"""
methodName = 'getGenres'
viewName = '%s.view' % methodName
q = self._getQueryDict({'genre': genre,
'count': count,
'offset': offset,
'musicFolderId': musicFolderId,
})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def hls (self, mid, bitrate=None):
"""
since 1.8.0
Creates an HTTP live streaming playlist for streaming video or
audio HLS is a streaming protocol implemented by Apple and
works by breaking the overall stream into a sequence of small
HTTP-based file downloads. It's supported by iOS and newer
versions of Android. This method also supports adaptive
bitrate streaming, see the bitRate parameter.
mid:str The ID of the media to stream
bitrate:str If specified, the server will attempt to limit the
bitrate to this value, in kilobits per second. If
this parameter is specified more than once, the
server will create a variant playlist, suitable
for adaptive bitrate streaming. The playlist will
support streaming at all the specified bitrates.
The server will automatically choose video dimensions
that are suitable for the given bitrates.
(since: 1.9.0) you may explicitly request a certain
width (480) and height (360) like so:
bitRate=1000@480x360
Returns the raw m3u8 file as a string
"""
methodName = 'hls'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': mid, 'bitrate': bitrate})
req = self._getRequest(viewName, q)
try:
res = self._doBinReq(req)
except urllib2.HTTPError:
# Avatar is not set/does not exist, return None
return None
if isinstance(res, dict):
self._checkStatus(res)
return res.read()
def refreshPodcasts(self):
"""
since: 1.9.0
Tells the server to check for new Podcast episodes. Note: The user
must be authorized for Podcast administration
"""
methodName = 'refreshPodcasts'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createPodcastChannel(self, url):
"""
since: 1.9.0
Adds a new Podcast channel. Note: The user must be authorized
for Podcast administration
url:str The URL of the Podcast to add
"""
methodName = 'createPodcastChannel'
viewName = '%s.view' % methodName
q = {'url': url}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePodcastChannel(self, pid):
"""
since: 1.9.0
Deletes a Podcast channel. Note: The user must be authorized
for Podcast administration
pid:str The ID of the Podcast channel to delete
"""
methodName = 'deletePodcastChannel'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deletePodcastEpisode(self, pid):
"""
since: 1.9.0
Deletes a Podcast episode. Note: The user must be authorized
for Podcast administration
pid:str The ID of the Podcast episode to delete
"""
methodName = 'deletePodcastEpisode'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def downloadPodcastEpisode(self, pid):
"""
since: 1.9.0
Tells the server to start downloading a given Podcast episode.
Note: The user must be authorized for Podcast administration
pid:str The ID of the Podcast episode to download
"""
methodName = 'downloadPodcastEpisode'
viewName = '%s.view' % methodName
q = {'id': pid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getInternetRadioStations(self):
"""
since: 1.9.0
Returns all internet radio stations
"""
methodName = 'getInternetRadioStations'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getBookmarks(self):
"""
since: 1.9.0
Returns all bookmarks for this user. A bookmark is a position
within a media file
"""
methodName = 'getBookmarks'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def createBookmark(self, mid, position, comment=None):
"""
since: 1.9.0
Creates or updates a bookmark (position within a media file).
Bookmarks are personal and not visible to other users
mid:str The ID of the media file to bookmark. If a bookmark
already exists for this file, it will be overwritten
position:int The position (in milliseconds) within the media file
comment:str A user-defined comment
"""
methodName = 'createBookmark'
viewName = '%s.view' % methodName
q = self._getQueryDict({'id': mid, 'position': position,
'comment': comment})
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def deleteBookmark(self, mid):
"""
since: 1.9.0
Deletes the bookmark for a given file
mid:str The ID of the media file to delete the bookmark from.
Other users' bookmarks are not affected
"""
methodName = 'deleteBookmark'
viewName = '%s.view' % methodName
q = {'id': mid}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtistInfo(self, aid, count=20, includeNotPresent=False):
"""
since: 1.11.0
Returns artist info with biography, image URLS and similar artists
using data from last.fm
aid:str The ID of the artist, album or song
count:int The max number of similar artists to return
includeNotPresent:bool Whether to return artists that are not
present in the media library
"""
methodName = 'getArtistInfo'
viewName = '%s.view' % methodName
q = {'id': aid, 'count': count,
'includeNotPresent': includeNotPresent}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getArtistInfo2(self, aid, count=20, includeNotPresent=False):
"""
since: 1.11.0
Similar to getArtistInfo(), but organizes music according to ID3 tags
aid:str The ID of the artist, album or song
count:int The max number of similar artists to return
includeNotPresent:bool Whether to return artists that are not
present in the media library
"""
methodName = 'getArtistInfo2'
viewName = '%s.view' % methodName
q = {'id': aid, 'count': count,
'includeNotPresent': includeNotPresent}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSimilarSongs(self, iid, count=50):
"""
since 1.11.0
Returns a random collection of songs from the given artist and
similar artists, using data from last.fm. Typically used for
artist radio features.
iid:str The artist, album, or song ID
count:int Max number of songs to return
"""
methodName = 'getSimilarSongs'
viewName = '%s.view' % methodName
q = {'id': iid, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getSimilarSongs2(self, iid, count=50):
"""
since 1.11.0
Similar to getSimilarSongs(), but organizes music according to
ID3 tags
iid:str The artist, album, or song ID
count:int Max number of songs to return
"""
methodName = 'getSimilarSongs2'
viewName = '%s.view' % methodName
q = {'id': iid, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def savePlayQueue(self, qids, current=None, position=None):
"""
since 1.12.0
qid:list[int] The list of song ids in the play queue
current:int The id of the current playing song
position:int The position, in milliseconds, within the current
playing song
Saves the state of the play queue for this user. This includes
the tracks in the play queue, the currently playing track, and
the position within this track. Typically used to allow a user to
move between different clients/apps while retaining the same play
queue (for instance when listening to an audio book).
"""
methodName = 'savePlayQueue'
viewName = '%s.view' % methodName
if not isinstance(qids, (tuple, list)):
qids = [qids]
q = self._getQueryDict({'current': current, 'position': position})
req = self._getRequestWithLists(viewName, {'id': qids}, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getPlayQueue(self):
"""
since 1.12.0
Returns the state of the play queue for this user (as set by
savePlayQueue). This includes the tracks in the play queue,
the currently playing track, and the position within this track.
Typically used to allow a user to move between different
clients/apps while retaining the same play queue (for instance
when listening to an audio book).
"""
methodName = 'getPlayQueue'
viewName = '%s.view' % methodName
req = self._getRequest(viewName)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getTopSongs(self, artist, count=50):
"""
since 1.13.0
Returns the top songs for a given artist
artist:str The artist to get songs for
count:int The number of songs to return
"""
methodName = 'getTopSongs'
viewName = '%s.view' % methodName
q = {'artist': artist, 'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def getNewestPodcasts(self, count=20):
"""
since 1.13.0
Returns the most recently published Podcast episodes
count:int The number of episodes to return
"""
methodName = 'getNewestPodcasts'
viewName = '%s.view' % methodName
q = {'count': count}
req = self._getRequest(viewName, q)
res = self._doInfoReq(req)
self._checkStatus(res)
return res
def scanMediaFolders(self):
"""
This is not an officially supported method of the API
Same as selecting 'Settings' > 'Scan media folders now' with
Subsonic web GUI
Returns True if refresh successful, False otherwise
"""
methodName = 'scanNow'
return self._unsupportedAPIFunction(methodName)
def cleanupDatabase(self):
"""
This is not an officially supported method of the API
Same as selecting 'Settings' > 'Clean-up Database' with Subsonic
web GUI
Returns True if cleanup initiated successfully, False otherwise
Subsonic stores information about all media files ever encountered.
By cleaning up the database, information about files that are
no longer in your media collection is permanently removed.
"""
methodName = 'expunge'
return self._unsupportedAPIFunction(methodName)
def _unsupportedAPIFunction(self, methodName):
"""
base function to call unsupported API methods
Returns True if refresh successful, False otherwise
:rtype : boolean
"""
baseMethod = 'musicFolderSettings'
viewName = '%s.view' % baseMethod
url = '%s:%d/%s/%s?%s' % (self._baseUrl, self._port,
self._separateServerPath(), viewName, methodName)
req = urllib2.Request(url)
res = self._opener.open(req)
res_msg = res.msg.lower()
return res_msg == 'ok'
#
# Private internal methods
#
def _getOpener(self, username, passwd):
creds = b64encode('%s:%s' % (username, passwd))
# Context is only relevent in >= python 2.7.9
https_chain = HTTPSHandlerChain()
if sys.version_info[:3] >= (2, 7, 9) and self._insecure:
https_chain = HTTPSHandlerChain(
context=ssl._create_unverified_context())
opener = urllib2.build_opener(PysHTTPRedirectHandler, https_chain)
opener.addheaders = [('Authorization', 'Basic %s' % creds)]
return opener
def _getQueryDict(self, d):
"""
Given a dictionary, it cleans out all the values set to None
"""
for k, v in d.items():
if v is None:
del d[k]
return d
def _getRequest(self, viewName, query={}):
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
req = urllib2.Request(url, urlencode(qstring))
return req
def _getRequestWithList(self, viewName, listName, alist, query={}):
"""
Like _getRequest, but allows appending a number of items with the
same key (listName). This bypasses the limitation of urlencode()
"""
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
data = StringIO()
data.write(urlencode(qstring))
for i in alist:
data.write('&%s' % urlencode({listName: i}))
req = urllib2.Request(url, data.getvalue())
return req
def _getRequestWithLists(self, viewName, listMap, query={}):
"""
Like _getRequestWithList(), but you must pass a dictionary
that maps the listName to the list. This allows for multiple
list parameters to be used, like in updatePlaylist()
viewName:str The name of the view
listMap:dict A mapping of listName to a list of entries
query:dict The normal query dict
"""
qstring = {'f': 'json', 'v': self._apiVersion, 'c': self._appName}
qstring.update(query)
url = '%s:%d/%s/%s' % (self._baseUrl, self._port, self._serverPath,
viewName)
data = StringIO()
data.write(urlencode(qstring))
for k, l in listMap.iteritems():
for i in l:
data.write('&%s' % urlencode({k: i}))
req = urllib2.Request(url, data.getvalue())
return req
def _doInfoReq(self, req):
# Returns a parsed dictionary version of the result
res = self._opener.open(req)
dres = json.loads(res.read())
return dres['subsonic-response']
def _doBinReq(self, req):
res = self._opener.open(req)
contType = res.info().getheader('Content-Type')
if contType:
if contType.startswith('text/html') or \
contType.startswith('application/json'):
dres = json.loads(res.read())
return dres['subsonic-response']
return res
def _checkStatus(self, result):
if result['status'] == 'ok':
return True
elif result['status'] == 'failed':
exc = getExcByCode(result['error']['code'])
raise exc(result['error']['message'])
def _hexEnc(self, raw):
"""
Returns a "hex encoded" string per the Subsonic api docs
raw:str The string to hex encode
"""
ret = ''
for c in raw:
ret += '%02X' % ord(c)
return ret
def _ts2milli(self, ts):
"""
For whatever reason, Subsonic uses timestamps in milliseconds since
the unix epoch. I have no idea what need there is of this precision,
but this will just multiply the timestamp times 1000 and return the int
"""
if ts is None:
return None
return int(ts * 1000)
def _separateServerPath(self):
"""
separate REST portion of URL from base server path.
"""
return urllib2.splithost(self._serverPath)[1].split('/')[0]
def _fixLastModified(self, data):
"""
This will recursively walk through a data structure and look for
a dict key/value pair where the key is "lastModified" and change
the shitty java millisecond timestamp to a real unix timestamp
of SECONDS since the unix epoch. JAVA SUCKS!
"""
if isinstance(data, dict):
for k, v in data.items():
if k == 'lastModified':
data[k] = long(v) / 1000.0
return
elif isinstance(v, (tuple, list, dict)):
return self._fixLastModified(v)
elif isinstance(data, (list, tuple)):
for item in data:
if isinstance(item, (list, tuple, dict)):
return self._fixLastModified(item)
def _process_netrc(self, use_netrc):
"""
The use_netrc var is either a boolean, which means we should use
the user's default netrc, or a string specifying a path to a
netrc formatted file
use_netrc:bool|str Either set to True to use the user's default
netrc file or a string specifying a specific
netrc file to use
"""
if not use_netrc:
raise CredentialError('useNetrc must be either a boolean "True" '
'or a string representing a path to a netrc file, '
'not {0}'.format(repr(use_netrc)))
if isinstance(use_netrc, bool) and use_netrc:
self._netrc = netrc()
else:
# This should be a string specifying a path to a netrc file
self._netrc = netrc(os.path.expanduser(use_netrc))
auth = self._netrc.authenticators(self._hostname)
if not auth:
raise CredentialError('No machine entry found for {0} in '
'your netrc file'.format(self._hostname))
# If we get here, we have credentials
self._username = auth[0]
self._rawPass = auth[2]
| ties/py-sonic | libsonic/connection.py | Python | gpl-3.0 | 100,484 |
msg_list["Echo Request"] = { "type" : "1" }
msg_list["Echo Response"] = { "type" : "2" }
msg_list["Version Not Supported Indication"] = { "type" : "3" }
msg_list["Create Session Request"] = { "type" : "32" }
msg_list["Create Session Response"] = { "type" : "33" }
msg_list["Delete Session Request"] = { "type" : "36" }
msg_list["Delete Session Response"] = { "type" : "37" }
msg_list["Modify Bearer Request"] = { "type" : "34" }
msg_list["Modify Bearer Response"] = { "type" : "35" }
msg_list["Remote UE Report Notification"] = { "type" : "40" }
msg_list["Remote UE Report Acknowledge"] = { "type" : "41" }
msg_list["Change Notification Request"] = { "type" : "38" }
msg_list["Change Notification Response"] = { "type" : "39" }
msg_list["Resume Notification"] = { "type" : "164" }
msg_list["Resume Acknowledge"] = { "type" : "165" }
msg_list["Modify Bearer Command"] = { "type" : "64" }
msg_list["Modify Bearer Failure Indication"] = { "type" : "65" }
msg_list["Delete Bearer Command"] = { "type" : "66" }
msg_list["Delete Bearer Failure Indication"] = { "type" : "67" }
msg_list["Bearer Resource Command"] = { "type" : "68" }
msg_list["Bearer Resource Failure Indication"] = { "type" : "69" }
msg_list["Downlink Data Notification Failure Indication"] = { "type" : "70" }
msg_list["Trace Session Activation"] = { "type" : "71" }
msg_list["Trace Session Deactivation"] = { "type" : "72" }
msg_list["Stop Paging Indication"] = { "type" : "73" }
msg_list["Create Bearer Request"] = { "type" : "95" }
msg_list["Create Bearer Response"] = { "type" : "96" }
msg_list["Update Bearer Request"] = { "type" : "97" }
msg_list["Update Bearer Response"] = { "type" : "98" }
msg_list["Delete Bearer Request"] = { "type" : "99" }
msg_list["Delete Bearer Response"] = { "type" : "100" }
msg_list["Delete PDN Connection Set Request"] = { "type" : "101" }
msg_list["Delete PDN Connection Set Response"] = { "type" : "102" }
msg_list["PGW Downlink Triggering Notification"] = { "type" : "103" }
msg_list["PGW Downlink Triggering Acknowledge"] = { "type" : "104" }
msg_list["Suspend Notification"] = { "type" : "162" }
msg_list["Suspend Acknowledge"] = { "type" : "163" }
msg_list["Create Forwarding Tunnel Request"] = { "type" : "160" }
msg_list["Create Forwarding Tunnel Response"] = { "type" : "161" }
msg_list["Create Indirect Data Forwarding Tunnel Request"] = { "type" : "166" }
msg_list["Create Indirect Data Forwarding Tunnel Response"] = { "type" : "167" }
msg_list["Delete Indirect Data Forwarding Tunnel Request"] = { "type" : "168" }
msg_list["Delete Indirect Data Forwarding Tunnel Response"] = { "type" : "169" }
msg_list["Release Access Bearers Request"] = { "type" : "170" }
msg_list["Release Access Bearers Response"] = { "type" : "171" }
msg_list["Downlink Data Notification"] = { "type" : "176" }
msg_list["Downlink Data Notification Acknowledge"] = { "type" : "177" }
msg_list["PGW Restart Notification"] = { "type" : "179" }
msg_list["PGW Restart Notification Acknowledge"] = { "type" : "180" }
msg_list["Update PDN Connection Set Request"] = { "type" : "200" }
msg_list["Update PDN Connection Set Response"] = { "type" : "201" }
msg_list["Modify Access Bearers Request"] = { "type" : "211" }
msg_list["Modify Access Bearers Response"] = { "type" : "212" }
| acetcom/cellwire | lib/gtp/support/cache/tlv-msg-list.py | Python | gpl-3.0 | 3,258 |
import unittest
from chat.commands.commandlist import CommandList
from chat.command import Command
from tests.structs.dummychat import DummyChat
class TestCommands(unittest.TestCase):
def setUp(self):
self.chat = DummyChat()
def test_get(self):
command = CommandList.get('help', self.chat, 'message')
self.assertTrue(command and isinstance(command, Command), 'Command get failed')
def test_validate(self):
fail_msg = 'Command validate failed'
self.assertTrue(CommandList.validate('help'), fail_msg)
self.assertTrue(CommandList.validate('!help'), fail_msg)
self.assertTrue(CommandList.validate('song'), fail_msg)
self.assertTrue(CommandList.validate('!song'), fail_msg)
self.assertTrue(CommandList.validate('restart'), fail_msg)
self.assertTrue(CommandList.validate('!restart'), fail_msg)
self.assertFalse(CommandList.validate('not a function'), fail_msg)
self.assertFalse(CommandList.validate('!not a function'), fail_msg) | jk977/twitch-plays | bot/tests/commands.py | Python | gpl-3.0 | 1,035 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the central widget showing the web pages.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QUrl
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QMenu, QToolButton, QDialog
from PyQt5.QtPrintSupport import QPrinter, QPrintDialog
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest
from E5Gui.E5TabWidget import E5TabWidget
from E5Gui import E5MessageBox
from E5Gui.E5Application import e5App
from .HelpBrowserWV import HelpBrowser
import UI.PixmapCache
import Utilities
import Preferences
from eric6config import getConfig
class HelpTabWidget(E5TabWidget):
"""
Class implementing the central widget showing the web pages.
@signal sourceChanged(HelpBrowser, QUrl) emitted after the URL of a browser
has changed
@signal titleChanged(HelpBrowser, str) emitted after the title of a browser
has changed
@signal showMessage(str) emitted to show a message in the main window
status bar
@signal browserClosed(QWidget) emitted after a browser was closed
@signal browserZoomValueChanged(int) emitted to signal a change of the
current browser's zoom level
"""
sourceChanged = pyqtSignal(HelpBrowser, QUrl)
titleChanged = pyqtSignal(HelpBrowser, str)
showMessage = pyqtSignal(str)
browserClosed = pyqtSignal(QWidget)
browserZoomValueChanged = pyqtSignal(int)
def __init__(self, parent):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
E5TabWidget.__init__(self, parent, dnd=True)
from .HelpTabBar import HelpTabBar
self.__tabBar = HelpTabBar(self)
self.setCustomTabBar(True, self.__tabBar)
self.__mainWindow = parent
self.setUsesScrollButtons(True)
self.setDocumentMode(True)
self.setElideMode(Qt.ElideNone)
from .ClosedTabsManager import ClosedTabsManager
self.__closedTabsManager = ClosedTabsManager(self)
self.__closedTabsManager.closedTabAvailable.connect(
self.__closedTabAvailable)
from .UrlBar.StackedUrlBar import StackedUrlBar
self.__stackedUrlBar = StackedUrlBar(self)
self.__tabBar.tabMoved.connect(self.__stackedUrlBar.moveBar)
self.__tabContextMenuIndex = -1
self.currentChanged[int].connect(self.__currentChanged)
self.setTabContextMenuPolicy(Qt.CustomContextMenu)
self.customTabContextMenuRequested.connect(self.__showContextMenu)
self.__rightCornerWidget = QWidget(self)
self.__rightCornerWidgetLayout = QHBoxLayout(self.__rightCornerWidget)
self.__rightCornerWidgetLayout.setContentsMargins(0, 0, 0, 0)
self.__rightCornerWidgetLayout.setSpacing(0)
self.__navigationMenu = QMenu(self)
self.__navigationMenu.aboutToShow.connect(self.__showNavigationMenu)
self.__navigationMenu.triggered.connect(self.__navigationMenuTriggered)
self.__navigationButton = QToolButton(self)
self.__navigationButton.setIcon(
UI.PixmapCache.getIcon("1downarrow.png"))
self.__navigationButton.setToolTip(
self.tr("Show a navigation menu"))
self.__navigationButton.setPopupMode(QToolButton.InstantPopup)
self.__navigationButton.setMenu(self.__navigationMenu)
self.__navigationButton.setEnabled(False)
self.__rightCornerWidgetLayout.addWidget(self.__navigationButton)
self.__closedTabsMenu = QMenu(self)
self.__closedTabsMenu.aboutToShow.connect(
self.__aboutToShowClosedTabsMenu)
self.__closedTabsButton = QToolButton(self)
self.__closedTabsButton.setIcon(UI.PixmapCache.getIcon("trash.png"))
self.__closedTabsButton.setToolTip(
self.tr("Show a navigation menu for closed tabs"))
self.__closedTabsButton.setPopupMode(QToolButton.InstantPopup)
self.__closedTabsButton.setMenu(self.__closedTabsMenu)
self.__closedTabsButton.setEnabled(False)
self.__rightCornerWidgetLayout.addWidget(self.__closedTabsButton)
self.__closeButton = QToolButton(self)
self.__closeButton.setIcon(UI.PixmapCache.getIcon("close.png"))
self.__closeButton.setToolTip(
self.tr("Close the current help window"))
self.__closeButton.setEnabled(False)
self.__closeButton.clicked[bool].connect(self.closeBrowser)
self.__rightCornerWidgetLayout.addWidget(self.__closeButton)
if Preferences.getUI("SingleCloseButton") or \
not hasattr(self, 'setTabsClosable'):
self.__closeButton.show()
else:
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.closeBrowserAt)
self.__closeButton.hide()
self.setCornerWidget(self.__rightCornerWidget, Qt.TopRightCorner)
self.__newTabButton = QToolButton(self)
self.__newTabButton.setIcon(UI.PixmapCache.getIcon("plus.png"))
self.__newTabButton.setToolTip(
self.tr("Open a new help window tab"))
self.setCornerWidget(self.__newTabButton, Qt.TopLeftCorner)
self.__newTabButton.clicked[bool].connect(self.newBrowser)
self.__initTabContextMenu()
self.__historyCompleter = None
def __initTabContextMenu(self):
"""
Private method to create the tab context menu.
"""
self.__tabContextMenu = QMenu(self)
self.tabContextNewAct = self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("tabNew.png"),
self.tr('New Tab'), self.newBrowser)
self.__tabContextMenu.addSeparator()
self.leftMenuAct = self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("1leftarrow.png"),
self.tr('Move Left'), self.__tabContextMenuMoveLeft)
self.rightMenuAct = self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("1rightarrow.png"),
self.tr('Move Right'), self.__tabContextMenuMoveRight)
self.__tabContextMenu.addSeparator()
self.tabContextCloneAct = self.__tabContextMenu.addAction(
self.tr("Duplicate Page"), self.__tabContextMenuClone)
self.__tabContextMenu.addSeparator()
self.tabContextCloseAct = self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("tabClose.png"),
self.tr('Close'), self.__tabContextMenuClose)
self.tabContextCloseOthersAct = self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("tabCloseOther.png"),
self.tr("Close Others"), self.__tabContextMenuCloseOthers)
self.__tabContextMenu.addAction(
self.tr('Close All'), self.closeAllBrowsers)
self.__tabContextMenu.addSeparator()
self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("printPreview.png"),
self.tr('Print Preview'), self.__tabContextMenuPrintPreview)
self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("print.png"),
self.tr('Print'), self.__tabContextMenuPrint)
self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("printPdf.png"),
self.tr('Print as PDF'), self.__tabContextMenuPrintPdf)
self.__tabContextMenu.addSeparator()
self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("reload.png"),
self.tr('Reload All'), self.reloadAllBrowsers)
self.__tabContextMenu.addSeparator()
self.__tabContextMenu.addAction(
UI.PixmapCache.getIcon("addBookmark.png"),
self.tr('Bookmark All Tabs'), self.__mainWindow.bookmarkAll)
self.__tabBackContextMenu = QMenu(self)
self.__tabBackContextMenu.addAction(
self.tr('Close All'), self.closeAllBrowsers)
self.__tabBackContextMenu.addAction(
UI.PixmapCache.getIcon("reload.png"),
self.tr('Reload All'), self.reloadAllBrowsers)
self.__tabBackContextMenu.addAction(
UI.PixmapCache.getIcon("addBookmark.png"),
self.tr('Bookmark All Tabs'), self.__mainWindow.bookmarkAll)
self.__tabBackContextMenu.addSeparator()
self.__restoreClosedTabAct = self.__tabBackContextMenu.addAction(
UI.PixmapCache.getIcon("trash.png"),
self.tr('Restore Closed Tab'), self.restoreClosedTab)
self.__restoreClosedTabAct.setEnabled(False)
self.__restoreClosedTabAct.setData(0)
def __showContextMenu(self, coord, index):
"""
Private slot to show the tab context menu.
@param coord the position of the mouse pointer (QPoint)
@param index index of the tab the menu is requested for (integer)
"""
coord = self.mapToGlobal(coord)
if index == -1:
self.__tabBackContextMenu.popup(coord)
else:
self.__tabContextMenuIndex = index
self.leftMenuAct.setEnabled(index > 0)
self.rightMenuAct.setEnabled(index < self.count() - 1)
self.tabContextCloseOthersAct.setEnabled(self.count() > 1)
self.__tabContextMenu.popup(coord)
def __tabContextMenuMoveLeft(self):
"""
Private method to move a tab one position to the left.
"""
self.moveTab(self.__tabContextMenuIndex,
self.__tabContextMenuIndex - 1)
def __tabContextMenuMoveRight(self):
"""
Private method to move a tab one position to the right.
"""
self.moveTab(self.__tabContextMenuIndex,
self.__tabContextMenuIndex + 1)
def __tabContextMenuClone(self):
"""
Private method to clone the selected tab.
"""
idx = self.__tabContextMenuIndex
if idx < 0:
idx = self.currentIndex()
if idx < 0 or idx > self.count():
return
req = QNetworkRequest(self.widget(idx).url())
req.setRawHeader(b"X-Eric6-UserLoadAction", b"1")
self.newBrowser(None, (req, QNetworkAccessManager.GetOperation, b""))
def __tabContextMenuClose(self):
"""
Private method to close the selected tab.
"""
self.closeBrowserAt(self.__tabContextMenuIndex)
def __tabContextMenuCloseOthers(self):
"""
Private slot to close all other tabs.
"""
index = self.__tabContextMenuIndex
for i in list(range(self.count() - 1, index, -1)) + \
list(range(index - 1, -1, -1)):
self.closeBrowserAt(i)
def __tabContextMenuPrint(self):
"""
Private method to print the selected tab.
"""
browser = self.widget(self.__tabContextMenuIndex)
self.printBrowser(browser)
def __tabContextMenuPrintPdf(self):
"""
Private method to print the selected tab as PDF.
"""
browser = self.widget(self.__tabContextMenuIndex)
self.printBrowserPdf(browser)
def __tabContextMenuPrintPreview(self):
"""
Private method to show a print preview of the selected tab.
"""
browser = self.widget(self.__tabContextMenuIndex)
self.printPreviewBrowser(browser)
def newBrowser(self, link=None, requestData=None, position=-1):
"""
Public method to create a new web browser tab.
@param link link to be shown (string or QUrl)
@param requestData tuple containing the request data (QNetworkRequest,
QNetworkAccessManager.Operation, QByteArray)
@keyparam position position to create the new tab at or -1 to add it
to the end (integer)
"""
if link is None:
linkName = ""
elif isinstance(link, QUrl):
linkName = link.toString()
else:
linkName = link
from .UrlBar.UrlBar import UrlBar
urlbar = UrlBar(self.__mainWindow, self)
if self.__historyCompleter is None:
import Helpviewer.HelpWindow
from .History.HistoryCompleter import HistoryCompletionModel, \
HistoryCompleter
self.__historyCompletionModel = HistoryCompletionModel(self)
self.__historyCompletionModel.setSourceModel(
Helpviewer.HelpWindow.HelpWindow.historyManager()
.historyFilterModel())
self.__historyCompleter = HistoryCompleter(
self.__historyCompletionModel, self)
self.__historyCompleter.activated[str].connect(self.__pathSelected)
urlbar.setCompleter(self.__historyCompleter)
urlbar.returnPressed.connect(self.__lineEditReturnPressed)
if position == -1:
self.__stackedUrlBar.addWidget(urlbar)
else:
self.__stackedUrlBar.insertWidget(position, urlbar)
browser = HelpBrowser(self.__mainWindow, self)
urlbar.setBrowser(browser)
browser.sourceChanged.connect(self.__sourceChanged)
browser.titleChanged.connect(self.__titleChanged)
browser.highlighted.connect(self.showMessage)
browser.backwardAvailable.connect(
self.__mainWindow.setBackwardAvailable)
browser.forwardAvailable.connect(self.__mainWindow.setForwardAvailable)
browser.loadStarted.connect(self.__loadStarted)
browser.loadFinished.connect(self.__loadFinished)
browser.iconChanged.connect(self.__iconChanged)
browser.search.connect(self.newBrowser)
browser.page().windowCloseRequested.connect(
self.__windowCloseRequested)
browser.page().printRequested.connect(self.__printRequested)
browser.zoomValueChanged.connect(self.browserZoomValueChanged)
if position == -1:
index = self.addTab(browser, self.tr("..."))
else:
index = self.insertTab(position, browser, self.tr("..."))
self.setCurrentIndex(index)
self.__mainWindow.closeAct.setEnabled(True)
self.__mainWindow.closeAllAct.setEnabled(True)
self.__closeButton.setEnabled(True)
self.__navigationButton.setEnabled(True)
if not linkName and not requestData:
if Preferences.getHelp("StartupBehavior") == 0:
linkName = Preferences.getHelp("HomePage")
elif Preferences.getHelp("StartupBehavior") == 1:
linkName = "eric:speeddial"
if linkName:
browser.setSource(QUrl(linkName))
if not browser.documentTitle():
self.setTabText(index, self.__elide(linkName, Qt.ElideMiddle))
self.setTabToolTip(index, linkName)
else:
self.setTabText(
index,
self.__elide(browser.documentTitle().replace("&", "&&")))
self.setTabToolTip(index, browser.documentTitle())
elif requestData:
browser.load(*requestData)
def newBrowserAfter(self, browser, link=None, requestData=None):
"""
Public method to create a new web browser tab after a given one.
@param browser reference to the browser to add after (HelpBrowser)
@param link link to be shown (string or QUrl)
@param requestData tuple containing the request data (QNetworkRequest,
QNetworkAccessManager.Operation, QByteArray)
"""
if browser:
position = self.indexOf(browser) + 1
else:
position = -1
self.newBrowser(link, requestData, position)
def __showNavigationMenu(self):
"""
Private slot to show the navigation button menu.
"""
self.__navigationMenu.clear()
for index in range(self.count()):
act = self.__navigationMenu.addAction(
self.tabIcon(index), self.tabText(index))
act.setData(index)
def __navigationMenuTriggered(self, act):
"""
Private slot called to handle the navigation button menu selection.
@param act reference to the selected action (QAction)
"""
index = act.data()
if index is not None:
self.setCurrentIndex(index)
def __windowCloseRequested(self):
"""
Private slot to handle the windowCloseRequested signal of a browser.
"""
page = self.sender()
if page is None:
return
browser = page.view()
if browser is None:
return
index = self.indexOf(browser)
self.closeBrowserAt(index)
def reloadAllBrowsers(self):
"""
Public slot to reload all browsers.
"""
for index in range(self.count()):
browser = self.widget(index)
browser and browser.reload()
def closeBrowser(self):
"""
Public slot called to handle the close action.
"""
self.closeBrowserAt(self.currentIndex())
def closeAllBrowsers(self):
"""
Public slot called to handle the close all action.
"""
for index in range(self.count() - 1, -1, -1):
self.closeBrowserAt(index)
def closeBrowserAt(self, index):
"""
Public slot to close a browser based on its index.
@param index index of browser to close (integer)
"""
urlbar = self.__stackedUrlBar.widget(index)
self.__stackedUrlBar.removeWidget(urlbar)
del urlbar
browser = self.widget(index)
if browser is None:
return
self.__closedTabsManager.recordBrowser(browser, index)
browser.home()
self.removeTab(index)
self.browserClosed.emit(browser)
del browser
if self.count() == 0:
self.newBrowser()
else:
self.currentChanged[int].emit(self.currentIndex())
def currentBrowser(self):
"""
Public method to get a reference to the current browser.
@return reference to the current browser (HelpBrowser)
"""
return self.currentWidget()
def browserAt(self, index):
"""
Public method to get a reference to the browser with the given index.
@param index index of the browser to get (integer)
@return reference to the indexed browser (HelpBrowser)
"""
return self.widget(index)
def browsers(self):
"""
Public method to get a list of references to all browsers.
@return list of references to browsers (list of HelpBrowser)
"""
li = []
for index in range(self.count()):
li.append(self.widget(index))
return li
@pyqtSlot()
def printBrowser(self, browser=None):
"""
Public slot called to print the displayed page.
@param browser reference to the browser to be printed (HelpBrowser)
"""
if browser is None:
browser = self.currentBrowser()
self.__printRequested(browser.page().mainFrame())
def __printRequested(self, frame):
"""
Private slot to handle a print request.
@param frame reference to the frame to be printed (QWebFrame)
"""
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
if Preferences.getPrinter("FirstPageFirst"):
printer.setPageOrder(QPrinter.FirstPageFirst)
else:
printer.setPageOrder(QPrinter.LastPageFirst)
printer.setPageMargins(
Preferences.getPrinter("LeftMargin") * 10,
Preferences.getPrinter("TopMargin") * 10,
Preferences.getPrinter("RightMargin") * 10,
Preferences.getPrinter("BottomMargin") * 10,
QPrinter.Millimeter
)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
printDialog = QPrintDialog(printer, self)
if printDialog.exec_() == QDialog.Accepted:
try:
frame.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in"""
""" PyQt5. Please upgrade.</p>"""))
return
@pyqtSlot()
def printBrowserPdf(self, browser=None):
"""
Public slot called to print the displayed page to PDF.
@param browser reference to the browser to be printed (HelpBrowser)
"""
if browser is None:
browser = self.currentBrowser()
self.__printPdfRequested(browser.page().mainFrame())
def __printPdfRequested(self, frame):
"""
Private slot to handle a print to PDF request.
@param frame reference to the frame to be printed (QWebFrame)
"""
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
printer.setOutputFormat(QPrinter.PdfFormat)
name = frame.url().path().rsplit('/', 1)[-1]
if name:
name = name.rsplit('.', 1)[0]
name += '.pdf'
printer.setOutputFileName(name)
printDialog = QPrintDialog(printer, self)
if printDialog.exec_() == QDialog.Accepted:
try:
frame.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in"""
""" PyQt5. Please upgrade.</p>"""))
return
@pyqtSlot()
def printPreviewBrowser(self, browser=None):
"""
Public slot called to show a print preview of the displayed file.
@param browser reference to the browser to be printed (HelpBrowserWV)
"""
from PyQt5.QtPrintSupport import QPrintPreviewDialog
if browser is None:
browser = self.currentBrowser()
printer = QPrinter(mode=QPrinter.HighResolution)
if Preferences.getPrinter("ColorMode"):
printer.setColorMode(QPrinter.Color)
else:
printer.setColorMode(QPrinter.GrayScale)
if Preferences.getPrinter("FirstPageFirst"):
printer.setPageOrder(QPrinter.FirstPageFirst)
else:
printer.setPageOrder(QPrinter.LastPageFirst)
printer.setPageMargins(
Preferences.getPrinter("LeftMargin") * 10,
Preferences.getPrinter("TopMargin") * 10,
Preferences.getPrinter("RightMargin") * 10,
Preferences.getPrinter("BottomMargin") * 10,
QPrinter.Millimeter
)
printerName = Preferences.getPrinter("PrinterName")
if printerName:
printer.setPrinterName(printerName)
self.__printPreviewBrowser = browser
preview = QPrintPreviewDialog(printer, self)
preview.paintRequested.connect(self.__printPreview)
preview.exec_()
def __printPreview(self, printer):
"""
Private slot to generate a print preview.
@param printer reference to the printer object (QPrinter)
"""
try:
self.__printPreviewBrowser.print_(printer)
except AttributeError:
E5MessageBox.critical(
self,
self.tr("eric6 Web Browser"),
self.tr(
"""<p>Printing is not available due to a bug in PyQt5."""
"""Please upgrade.</p>"""))
return
def __sourceChanged(self, url):
"""
Private slot to handle a change of a browsers source.
@param url URL of the new site (QUrl)
"""
browser = self.sender()
if browser is not None:
self.sourceChanged.emit(browser, url)
def __titleChanged(self, title):
"""
Private slot to handle a change of a browsers title.
@param title new title (string)
"""
browser = self.sender()
if browser is not None:
index = self.indexOf(browser)
if title == "":
title = browser.url().toString()
self.setTabText(index, self.__elide(title.replace("&", "&&")))
self.setTabToolTip(index, title)
self.titleChanged.emit(browser, title)
def __elide(self, txt, mode=Qt.ElideRight, length=40):
"""
Private method to elide some text.
@param txt text to be elided (string)
@keyparam mode elide mode (Qt.TextElideMode)
@keyparam length amount of characters to be used (integer)
@return the elided text (string)
"""
if mode == Qt.ElideNone or len(txt) < length:
return txt
elif mode == Qt.ElideLeft:
return "...{0}".format(txt[-length:])
elif mode == Qt.ElideMiddle:
return "{0}...{1}".format(txt[:length // 2], txt[-(length // 2):])
elif mode == Qt.ElideRight:
return "{0}...".format(txt[:length])
else:
# just in case
return txt
def preferencesChanged(self):
"""
Public slot to handle a change of preferences.
"""
for browser in self.browsers():
browser.preferencesChanged()
for urlbar in self.__stackedUrlBar.urlBars():
urlbar.preferencesChanged()
if Preferences.getUI("SingleCloseButton") or \
not hasattr(self, 'setTabsClosable'):
if hasattr(self, 'setTabsClosable'):
self.setTabsClosable(False)
try:
self.tabCloseRequested.disconnect(self.closeBrowserAt)
except TypeError:
pass
self.__closeButton.show()
else:
self.setTabsClosable(True)
self.tabCloseRequested.connect(self.closeBrowserAt)
self.__closeButton.hide()
def __loadStarted(self):
"""
Private method to handle the loadStarted signal.
"""
browser = self.sender()
if browser is not None:
index = self.indexOf(browser)
anim = self.animationLabel(
index, os.path.join(getConfig("ericPixDir"), "loading.gif"),
100)
if not anim:
loading = QIcon(os.path.join(getConfig("ericPixDir"),
"loading.gif"))
self.setTabIcon(index, loading)
else:
self.setTabIcon(index, QIcon())
self.setTabText(index, self.tr("Loading..."))
self.setTabToolTip(index, self.tr("Loading..."))
self.showMessage.emit(self.tr("Loading..."))
self.__mainWindow.setLoadingActions(True)
def __loadFinished(self, ok):
"""
Private method to handle the loadFinished signal.
@param ok flag indicating the result (boolean)
"""
browser = self.sender()
if not isinstance(browser, HelpBrowser):
return
if browser is not None:
import Helpviewer.HelpWindow
index = self.indexOf(browser)
self.resetAnimation(index)
self.setTabIcon(
index, Helpviewer.HelpWindow.HelpWindow.icon(browser.url()))
if ok:
self.showMessage.emit(self.tr("Finished loading"))
else:
self.showMessage.emit(self.tr("Failed to load"))
self.__mainWindow.setLoadingActions(False)
def __iconChanged(self):
"""
Private slot to handle the icon change.
"""
browser = self.sender()
if browser is not None:
import Helpviewer.HelpWindow
self.setTabIcon(
self.indexOf(browser),
Helpviewer.HelpWindow.HelpWindow.icon(browser.url()))
Helpviewer.HelpWindow.HelpWindow.bookmarksManager()\
.iconChanged(browser.url())
def getSourceFileList(self):
"""
Public method to get a list of all opened source files.
@return dictionary with tab id as key and host/namespace as value
"""
sourceList = {}
for i in range(self.count()):
browser = self.widget(i)
if browser is not None and \
browser.source().isValid():
sourceList[i] = browser.source().host()
return sourceList
def shallShutDown(self):
"""
Public method to check, if the application should be shut down.
@return flag indicating a shut down (boolean)
"""
if self.count() > 1 and Preferences.getHelp("WarnOnMultipleClose"):
mb = E5MessageBox.E5MessageBox(
E5MessageBox.Information,
self.tr("Are you sure you want to close the window?"),
self.tr("""Are you sure you want to close the window?\n"""
"""You have %n tab(s) open.""", "", self.count()),
modal=True,
parent=self)
if self.__mainWindow.fromEric:
quitButton = mb.addButton(
self.tr("&Close"), E5MessageBox.AcceptRole)
quitButton.setIcon(UI.PixmapCache.getIcon("close.png"))
else:
quitButton = mb.addButton(
self.tr("&Quit"), E5MessageBox.AcceptRole)
quitButton.setIcon(UI.PixmapCache.getIcon("exit.png"))
closeTabButton = mb.addButton(
self.tr("C&lose Current Tab"), E5MessageBox.AcceptRole)
closeTabButton.setIcon(UI.PixmapCache.getIcon("tabClose.png"))
mb.addButton(E5MessageBox.Cancel)
mb.exec_()
if mb.clickedButton() == quitButton:
return True
else:
if mb.clickedButton() == closeTabButton:
self.closeBrowser()
return False
return True
def stackedUrlBar(self):
"""
Public method to get a reference to the stacked url bar.
@return reference to the stacked url bar (StackedUrlBar)
"""
return self.__stackedUrlBar
def currentUrlBar(self):
"""
Public method to get a reference to the current url bar.
@return reference to the current url bar (UrlBar)
"""
return self.__stackedUrlBar.currentWidget()
def __lineEditReturnPressed(self):
"""
Private slot to handle the entering of an URL.
"""
edit = self.sender()
url = self.__guessUrlFromPath(edit.text())
request = QNetworkRequest(url)
request.setRawHeader(b"X-Eric6-UserLoadAction", b"1")
if e5App().keyboardModifiers() == Qt.AltModifier:
self.newBrowser(
None, (request, QNetworkAccessManager.GetOperation, b""))
else:
self.currentBrowser().setSource(
None, (request, QNetworkAccessManager.GetOperation, b""))
self.currentBrowser().setFocus()
def __pathSelected(self, path):
"""
Private slot called when a URL is selected from the completer.
@param path path to be shown (string)
"""
url = self.__guessUrlFromPath(path)
self.currentBrowser().setSource(url)
def __guessUrlFromPath(self, path):
"""
Private method to guess an URL given a path string.
@param path path string to guess an URL for (string)
@return guessed URL (QUrl)
"""
manager = self.__mainWindow.openSearchManager()
path = Utilities.fromNativeSeparators(path)
url = manager.convertKeywordSearchToUrl(path)
if url.isValid():
return url
try:
url = QUrl.fromUserInput(path)
except AttributeError:
url = QUrl(path)
if url.scheme() == "about" and \
url.path() == "home":
url = QUrl("eric:home")
if url.scheme() in ["s", "search"]:
url = manager.currentEngine().searchUrl(url.path().strip())
if url.scheme() != "" and \
(url.host() != "" or url.path() != ""):
return url
urlString = Preferences.getHelp("DefaultScheme") + path.strip()
url = QUrl.fromEncoded(urlString.encode("utf-8"), QUrl.TolerantMode)
return url
def __currentChanged(self, index):
"""
Private slot to handle an index change.
@param index new index (integer)
"""
self.__stackedUrlBar.setCurrentIndex(index)
browser = self.browserAt(index)
if browser is not None:
if browser.url() == "" and browser.hasFocus():
self.__stackedUrlBar.currentWidget.setFocus()
elif browser.url() != "":
browser.setFocus()
def restoreClosedTab(self):
"""
Public slot to restore the most recently closed tab.
"""
if not self.canRestoreClosedTab():
return
act = self.sender()
tab = self.__closedTabsManager.getClosedTabAt(act.data())
self.newBrowser(tab.url.toString(), position=tab.position)
def canRestoreClosedTab(self):
"""
Public method to check, if closed tabs can be restored.
@return flag indicating that closed tabs can be restored (boolean)
"""
return self.__closedTabsManager.isClosedTabAvailable()
def restoreAllClosedTabs(self):
"""
Public slot to restore all closed tabs.
"""
if not self.canRestoreClosedTab():
return
for tab in self.__closedTabsManager.allClosedTabs():
self.newBrowser(tab.url.toString(), position=tab.position)
self.__closedTabsManager.clearList()
def clearClosedTabsList(self):
"""
Public slot to clear the list of closed tabs.
"""
self.__closedTabsManager.clearList()
def __aboutToShowClosedTabsMenu(self):
"""
Private slot to populate the closed tabs menu.
"""
fm = self.__closedTabsMenu.fontMetrics()
maxWidth = fm.width('m') * 40
self.__closedTabsMenu.clear()
index = 0
for tab in self.__closedTabsManager.allClosedTabs():
title = fm.elidedText(tab.title, Qt.ElideRight, maxWidth)
self.__closedTabsMenu.addAction(
self.__mainWindow.icon(tab.url), title,
self.restoreClosedTab).setData(index)
index += 1
self.__closedTabsMenu.addSeparator()
self.__closedTabsMenu.addAction(
self.tr("Restore All Closed Tabs"), self.restoreAllClosedTabs)
self.__closedTabsMenu.addAction(
self.tr("Clear List"), self.clearClosedTabsList)
def closedTabsManager(self):
"""
Public slot to get a reference to the closed tabs manager.
@return reference to the closed tabs manager (ClosedTabsManager)
"""
return self.__closedTabsManager
def __closedTabAvailable(self, avail):
"""
Private slot to handle changes of the availability of closed tabs.
@param avail flag indicating the availability of closed tabs (boolean)
"""
self.__closedTabsButton.setEnabled(avail)
self.__restoreClosedTabAct.setEnabled(avail)
| paulmadore/Eric-IDE | 6-6.0.9/eric/Helpviewer/HelpTabWidget.py | Python | gpl-3.0 | 37,071 |
"""
Track channel ops for permissions checks
Requires:
server_info.py
"""
import gc
import json
import logging
import weakref
from collections import Mapping, Iterable, namedtuple
from contextlib import suppress
from numbers import Number
from operator import attrgetter
from irclib.parser import Prefix
import cloudbot.bot
from cloudbot import hook
from cloudbot.clients.irc import IrcClient
from cloudbot.util import web
from cloudbot.util.mapping import KeyFoldDict, KeyFoldMixin
logger = logging.getLogger("cloudbot")
class WeakDict(dict):
"""
A subclass of dict to allow it to be weakly referenced
"""
class MemberNotFoundException(KeyError):
def __init__(self, name, chan):
super().__init__(
"No such member '{}' in channel '{}'".format(
name, chan.name
)
)
self.name = name
self.chan = chan
self.members = list(chan.users.values())
self.nicks = [
memb.user.nick for memb in self.members
]
self.masks = [
memb.user.mask.mask for memb in self.members
]
class ChannelMembersDict(KeyFoldDict):
def __init__(self, chan):
super().__init__()
self.chan = weakref.ref(chan)
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError as e:
raise MemberNotFoundException(item, self.chan()) from e
def __delitem__(self, item):
try:
super().__delitem__(item)
except KeyError as e:
raise MemberNotFoundException(item, self.chan()) from e
def pop(self, key, *args, **kwargs):
try:
return super().pop(key, *args, **kwargs)
except KeyError as e:
raise MemberNotFoundException(key, self.chan()) from e
class KeyFoldWeakValueDict(KeyFoldMixin, weakref.WeakValueDictionary):
"""
KeyFolded WeakValueDictionary
"""
class ChanDict(KeyFoldDict):
"""
Mapping for channels on a network
"""
def __init__(self, conn):
"""
:type conn: cloudbot.client.Client
"""
super().__init__()
self.conn = weakref.ref(conn)
def getchan(self, name):
"""
:type name: str
"""
try:
return self[name]
except KeyError:
self[name] = value = Channel(name, self.conn())
return value
class UsersDict(KeyFoldWeakValueDict):
"""
Mapping for users on a network
"""
def __init__(self, conn):
"""
:type conn: cloudbot.client.Client
"""
super().__init__()
self.conn = weakref.ref(conn)
def getuser(self, nick):
"""
:type nick: str
"""
try:
return self[nick]
except KeyError:
self[nick] = value = User(nick, self.conn())
return value
class MappingAttributeAdapter:
"""
Map item lookups to attribute lookups
"""
def __init__(self):
self.data = {}
def __getitem__(self, item):
try:
return getattr(self, item)
except AttributeError:
return self.data[item]
def __setitem__(self, key, value):
if not hasattr(self, key):
self.data[key] = value
else:
setattr(self, key, value)
class Channel(MappingAttributeAdapter):
"""
Represents a channel and relevant data
"""
class Member(MappingAttributeAdapter):
"""
Store a user's membership with the channel
"""
def __init__(self, user, channel):
self.user = user
self.channel = channel
self.conn = user.conn
self.status = []
super().__init__()
def add_status(self, status, sort=True):
"""
Add a status to this membership
:type status: plugins.core.server_info.Status
:type sort: bool
"""
if status in self.status:
logger.warning(
"[%s|chantrack] Attempted to add existing status "
"to channel member: %s %s",
self.conn.name, self, status
)
else:
self.status.append(status)
if sort:
self.sort_status()
def remove_status(self, status):
"""
:type status: plugins.core.server_info.Status
"""
if status not in self.status:
logger.warning(
"[%s|chantrack] Attempted to remove status not set "
"on member: %s %s",
self.conn.name, self, status
)
else:
self.status.remove(status)
def sort_status(self):
"""
Ensure the status list is properly sorted
"""
status = list(set(self.status))
status.sort(key=attrgetter("level"), reverse=True)
self.status = status
def __init__(self, name, conn):
"""
:type name: str
:type conn: cloudbot.client.Client
"""
super().__init__()
self.name = name
self.conn = weakref.proxy(conn)
self.users = ChannelMembersDict(self)
self.receiving_names = False
def get_member(self, user, create=False):
"""
:type user: User
:type create: bool
:rtype: Channel.Member
"""
try:
data = self.users[user.nick]
except KeyError:
if not create:
raise
self.users[user.nick] = data = self.Member(user, self)
return data
class User(MappingAttributeAdapter):
"""
Represent a user on a network
"""
def __init__(self, name, conn):
"""
:type name: str
:type conn: cloudbot.client.Client
"""
self.mask = Prefix(name)
self.conn = weakref.proxy(conn)
self.realname = None
self._account = None
self.server = None
self.is_away = False
self.away_message = None
self.is_oper = False
self.channels = KeyFoldWeakValueDict()
super().__init__()
def join_channel(self, channel):
"""
:type channel: Channel
"""
self.channels[channel.name] = memb = channel.get_member(
self, create=True
)
return memb
@property
def account(self):
"""
The user's nickserv account
"""
return self._account
@account.setter
def account(self, value):
if value == '*':
value = None
self._account = value
@property
def nick(self):
"""
The user's nickname
"""
return self.mask.nick
@nick.setter
def nick(self, value):
self.mask = Prefix(value, self.ident, self.host)
@property
def ident(self):
"""
The user's ident/username
"""
return self.mask.user
@ident.setter
def ident(self, value):
self.mask = Prefix(self.nick, value, self.host)
@property
def host(self):
"""
The user's host/address
"""
return self.mask.host
@host.setter
def host(self, value):
self.mask = Prefix(self.nick, self.ident, value)
# region util functions
def get_users(conn):
"""
:type conn: cloudbot.client.Client
:rtype: UsersDict
"""
return conn.memory.setdefault("users", UsersDict(conn))
def get_chans(conn):
"""
:type conn: cloudbot.client.Client
:rtype: ChanDict
"""
return conn.memory.setdefault("chan_data", ChanDict(conn))
# endregion util functions
def update_chan_data(conn, chan):
# type: (IrcClient, str) -> None
"""
Start the process of updating channel data from /NAMES
:param conn: The current connection
:param chan: The channel to update
"""
chan_data = get_chans(conn).getchan(chan)
chan_data.receiving_names = False
conn.cmd("NAMES", chan)
def update_conn_data(conn):
# type: (IrcClient) -> None
"""
Update all channel data for this connection
:param conn: The connection to update
"""
for chan in set(conn.channels):
update_chan_data(conn, chan)
SUPPORTED_CAPS = frozenset({
"userhost-in-names",
"multi-prefix",
"extended-join",
"account-notify",
"away-notify",
"chghost",
})
@hook.on_cap_available(*SUPPORTED_CAPS)
def do_caps():
"""
Request all available CAPs we support
"""
return True
def is_cap_available(conn, cap):
"""
:type conn: cloudbot.client.Client
:type cap: str
"""
caps = conn.memory.get("server_caps", {})
return bool(caps.get(cap, False))
@hook.on_start
def get_chan_data(bot: cloudbot.bot.CloudBot):
"""
:type bot: cloudbot.bot.CloudBot
"""
for conn in bot.connections.values():
if conn.connected and conn.type == 'irc':
assert isinstance(conn, IrcClient)
init_chan_data(conn, False)
update_conn_data(conn)
def clean_user_data(user):
"""
:type user: User
"""
for memb in user.channels.values():
memb.sort_status()
def clean_chan_data(chan):
"""
:type chan: Channel
"""
with suppress(KeyError):
del chan.data["new_users"]
def clean_conn_data(conn):
"""
:type conn: cloudbot.client.Client
"""
for user in get_users(conn).values():
clean_user_data(user)
for chan in get_chans(conn).values():
clean_chan_data(chan)
def clean_data(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
for conn in bot.connections.values():
clean_conn_data(conn)
@hook.connect
def init_chan_data(conn, _clear=True):
"""
:type conn: cloudbot.client.Client
:type _clear: bool
"""
chan_data = get_chans(conn)
users = get_users(conn)
if not (isinstance(chan_data, ChanDict) and isinstance(users, UsersDict)):
del conn.memory["chan_data"]
del conn.memory["users"]
return init_chan_data(conn, _clear)
if _clear:
chan_data.clear()
users.clear()
return None
def parse_names_item(item, statuses, has_multi_prefix, has_userhost):
"""
Parse an entry from /NAMES
:param item: The entry to parse
:param statuses: Status prefixes on this network
:param has_multi_prefix: Whether multi-prefix CAP is enabled
:param has_userhost: Whether userhost-in-names CAP is enabled
:return: The parsed data
"""
user_status = []
while item[:1] in statuses:
status, item = item[:1], item[1:]
user_status.append(statuses[status])
if not has_multi_prefix:
# Only remove one status prefix
# if we don't have multi prefix enabled
break
user_status.sort(key=attrgetter('level'), reverse=True)
if has_userhost:
prefix = Prefix.parse(item)
else:
prefix = Prefix(item)
return prefix.nick, prefix.user, prefix.host, user_status
def replace_user_data(conn, chan_data):
"""
:type conn: cloudbot.client.Client
:type chan_data: Channel
"""
statuses = {
status.prefix: status
for status in set(conn.memory["server_info"]["statuses"].values())
}
new_data = chan_data.data.pop("new_users", [])
has_uh_i_n = is_cap_available(conn, "userhost-in-names")
has_multi_pfx = is_cap_available(conn, "multi-prefix")
old_data = chan_data.data.pop('old_users', {})
new_names = set()
for name in new_data:
nick, ident, host, status = parse_names_item(
name, statuses, has_multi_pfx, has_uh_i_n
)
new_names.update(nick.casefold())
user_data = get_users(conn).getuser(nick)
user_data.nick = nick
if ident:
user_data.ident = ident
if host:
user_data.host = host
memb_data = user_data.join_channel(chan_data)
memb_data.status = status
for old_nick in old_data:
if old_nick not in new_names:
del chan_data.users[old_nick]
@hook.irc_raw(['353', '366'], singlethread=True)
def on_names(conn, irc_paramlist, irc_command):
"""
:type conn: cloudbot.client.Client
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type irc_command: str
"""
chan = irc_paramlist[2 if irc_command == '353' else 1]
chan_data = get_chans(conn).getchan(chan)
if irc_command == '366':
chan_data.receiving_names = False
replace_user_data(conn, chan_data)
return
users = chan_data.data.setdefault("new_users", [])
if not chan_data.receiving_names:
chan_data.data['old_users'] = old = ChannelMembersDict(chan_data)
old.update(chan_data.users)
chan_data.receiving_names = True
users.clear()
names = irc_paramlist[-1].strip()
users.extend(names.split())
class MappingSerializer:
"""
Serialize generic mappings to json
"""
def __init__(self):
self._seen_objects = []
def _serialize(self, obj):
if isinstance(obj, (str, Number, bool)) or obj is None:
return obj
if isinstance(obj, Mapping):
if id(obj) in self._seen_objects:
return '<{} with id {}>'.format(type(obj).__name__, id(obj))
self._seen_objects.append(id(obj))
return {
self._serialize(k): self._serialize(v)
for k, v in obj.items()
}
if isinstance(obj, Iterable):
if id(obj) in self._seen_objects:
return '<{} with id {}>'.format(type(obj).__name__, id(obj))
self._seen_objects.append(id(obj))
return [
self._serialize(item)
for item in obj
]
return repr(obj)
def serialize(self, mapping, **kwargs):
"""
Serialize mapping to JSON
"""
return json.dumps(self._serialize(mapping), **kwargs)
@hook.permission("chanop")
def perm_check(chan, conn, nick):
"""
:type chan: str
:type conn: cloudbot.client.Client
:type nick: str
"""
if not (chan and conn):
return False
chans = get_chans(conn)
try:
chan_data = chans[chan]
except KeyError:
return False
try:
memb = chan_data.users[nick]
except KeyError:
return False
status = memb.status
if status and status[0].level > 1:
return True
return False
@hook.command(permissions=["botcontrol"], autohelp=False)
def dumpchans(conn):
"""- Dumps all stored channel data for this connection to the console
:type conn: cloudbot.client.Client
"""
data = get_chans(conn)
return web.paste(MappingSerializer().serialize(data, indent=2))
@hook.command(permissions=["botcontrol"], autohelp=False)
def dumpusers(conn):
"""- Dumps all stored user data for this connection to the console
:type conn: cloudbot.client.Client
"""
data = get_users(conn)
return web.paste(MappingSerializer().serialize(data, indent=2))
@hook.command(permissions=["botcontrol"], autohelp=False)
def updateusers(bot):
"""- Forces an update of all /NAMES data for all channels
:type bot: cloudbot.bot.CloudBot
"""
get_chan_data(bot)
return "Updating all channel data"
@hook.command(permissions=["botcontrol"], autohelp=False)
def cleanusers(bot):
"""- Clean user data
:type bot: cloudbot.bot.CloudBot
"""
clean_data(bot)
gc.collect()
return "Data cleaned."
@hook.command(permissions=["botcontrol"], autohelp=False)
def clearusers(bot):
"""- Clear all user data
:type bot: cloudbot.bot.CloudBot
"""
for conn in bot.connections.values():
init_chan_data(conn)
gc.collect()
return "Data cleared."
@hook.command("getdata", permissions=["botcontrol"], autohelp=False)
def getdata_cmd(conn, chan, nick):
"""- Get data for current user"""
chan_data = get_chans(conn).getchan(chan)
user_data = get_users(conn).getuser(nick)
memb = chan_data.get_member(user_data)
return web.paste(MappingSerializer().serialize(memb, indent=2))
@hook.irc_raw('JOIN')
def on_join(nick, user, host, conn, irc_paramlist):
"""
:type nick: str
:type user: str
:type host: str
:type conn: cloudbot.client.Client
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
"""
chan, *other_data = irc_paramlist
users = get_users(conn)
user_data = users.getuser(nick)
user_data.ident = user
user_data.host = host
if is_cap_available(conn, "extended-join") and other_data:
acct, realname = other_data
user_data.account = acct
user_data.realname = realname
chan_data = get_chans(conn).getchan(chan)
user_data.join_channel(chan_data)
ModeChange = namedtuple('ModeChange', 'mode adding param is_status')
def _parse_mode_string(modes, params, status_modes, mode_types):
new_modes = []
adding = True
for c in modes:
if c == '+':
adding = True
elif c == '-':
adding = False
else:
is_status = c in status_modes
mode_type = mode_types.get(c)
if mode_type:
mode_type = mode_type.type
else:
mode_type = 'B' if is_status else None
if mode_type in "AB" or (mode_type == 'C' and adding):
param = params.pop(0)
else:
param = None
new_modes.append(ModeChange(c, adding, param, is_status))
return new_modes
@hook.irc_raw('MODE')
def on_mode(chan, irc_paramlist, conn):
"""
:type chan: str
:type conn: cloudbot.client.Client
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
"""
if irc_paramlist[0].casefold() == conn.nick.casefold():
# this is a user mode line
return
serv_info = conn.memory["server_info"]
statuses = serv_info["statuses"]
status_modes = {status.mode for status in statuses.values()}
mode_types = serv_info["channel_modes"]
chan_data = get_chans(conn).getchan(chan)
modes = irc_paramlist[1]
mode_params = list(irc_paramlist[2:]).copy()
new_modes = _parse_mode_string(modes, mode_params, status_modes, mode_types)
new_statuses = [change for change in new_modes if change.is_status]
to_sort = {}
for change in new_statuses:
status_char = change.mode
nick = change.param
user = get_users(conn).getuser(nick)
memb = chan_data.get_member(user, create=True)
status = statuses[status_char]
if change.adding:
memb.add_status(status, sort=False)
to_sort[user.nick] = memb
else:
memb.remove_status(status)
for member in to_sort.values():
member.sort_status()
@hook.irc_raw('PART')
def on_part(chan, nick, conn):
"""
:type chan: str
:type nick: str
:type conn: cloudbot.client.Client
"""
channels = get_chans(conn)
if nick.casefold() == conn.nick.casefold():
del channels[chan]
else:
chan_data = channels[chan]
del chan_data.users[nick]
@hook.irc_raw('KICK')
def on_kick(chan, target, conn):
"""
:type chan: str
:type target: str
:type conn: cloudbot.client.Client
"""
on_part(chan, target, conn)
@hook.irc_raw('QUIT')
def on_quit(nick, conn):
"""
:type nick: str
:type conn: cloudbot.client.Client
"""
users = get_users(conn)
if nick in users:
user = users.pop(nick)
for memb in user.channels.values():
chan = memb.channel
del chan.users[nick]
@hook.irc_raw('NICK')
def on_nick(nick, irc_paramlist, conn):
"""
:type nick: str
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
users = get_users(conn)
new_nick = irc_paramlist[0]
user = users.pop(nick)
users[new_nick] = user
user.nick = new_nick
for memb in user.channels.values():
chan_users = memb.channel.users
chan_users[new_nick] = chan_users.pop(nick)
@hook.irc_raw('ACCOUNT')
def on_account(conn, nick, irc_paramlist):
"""
:type nick: str
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
get_users(conn).getuser(nick).account = irc_paramlist[0]
@hook.irc_raw('CHGHOST')
def on_chghost(conn, nick, irc_paramlist):
"""
:type nick: str
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
ident, host = irc_paramlist
user = get_users(conn).getuser(nick)
user.ident = ident
user.host = host
@hook.irc_raw('AWAY')
def on_away(conn, nick, irc_paramlist):
"""
:type nick: str
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
if irc_paramlist:
reason = irc_paramlist[0]
else:
reason = None
user = get_users(conn).getuser(nick)
user.is_away = (reason is not None)
user.away_message = reason
@hook.irc_raw('352')
def on_who(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
_, _, ident, host, server, nick, status, realname = irc_paramlist
realname = realname.split(None, 1)[1]
user = get_users(conn).getuser(nick)
status = list(status)
is_away = status.pop(0) == "G"
is_oper = status[:1] == "*"
user.ident = ident
user.host = host
user.server = server
user.realname = realname
user.is_away = is_away
user.is_oper = is_oper
@hook.irc_raw('311')
def on_whois_name(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
_, nick, ident, host, _, realname = irc_paramlist
user = get_users(conn).getuser(nick)
user.ident = ident
user.host = host
user.realname = realname
@hook.irc_raw('330')
def on_whois_acct(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
_, nick, acct = irc_paramlist[:2]
get_users(conn).getuser(nick).account = acct
@hook.irc_raw('301')
def on_whois_away(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
_, nick, msg = irc_paramlist
user = get_users(conn).getuser(nick)
user.is_away = True
user.away_message = msg
@hook.irc_raw('312')
def on_whois_server(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
_, nick, server, _ = irc_paramlist
get_users(conn).getuser(nick).server = server
@hook.irc_raw('313')
def on_whois_oper(conn, irc_paramlist):
"""
:type irc_paramlist: cloudbot.util.parsers.irc.ParamList
:type conn: cloudbot.client.Client
"""
nick = irc_paramlist[1]
get_users(conn).getuser(nick).is_oper = True
| tiredtyrant/CloudBot | plugins/chan_track.py | Python | gpl-3.0 | 23,414 |
import random
T = 10
for tt in range(T):
n = 10 ** 6
print(n)
for i in range(n):
print(random.randint(0, 1000), end = ' ')
print()
for i in range(n):
print(random.randint(1, 1000), end = ' ')
print()
| kzoacn/Grimoire | Training/9.10(ShenyangOnline)/L_gen.py | Python | gpl-3.0 | 214 |
from app.db import db
from task import Task
class User(db.Document):
name = db.StringField(required=True)
password = db.StringField(required=True)
email = db.StringField(required=True)
tasks = db.ListField(db.ReferenceField(Task))
def serialize(self):
return {
'id': str(self.id),
'name': self.name,
'email': self.email,
'password': self.password,
'tasks': [t.serialize() for t in self.tasks]
}
| janol77/rest-api | app/api/models/user.py | Python | gpl-3.0 | 494 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetAllGroups
# Retrieve data for all groups in an account.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetAllGroups(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetAllGroups Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetAllGroups, self).__init__(temboo_session, '/Library/Google/Contacts/GetAllGroups')
def new_input_set(self):
return GetAllGroupsInputSet()
def _make_result_set(self, result, path):
return GetAllGroupsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetAllGroupsChoreographyExecution(session, exec_id, path)
class GetAllGroupsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetAllGroups
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
super(GetAllGroupsInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The OAuth client ID provided by Google when you register your application.)
"""
super(GetAllGroupsInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((required, string) The OAuth client secret provided by Google when you registered your application.)
"""
super(GetAllGroupsInputSet, self)._set_input('ClientSecret', value)
def set_MaxResults(self, value):
"""
Set the value of the MaxResults input for this Choreo. ((optional, integer) The maximum number of entries to return.)
"""
super(GetAllGroupsInputSet, self)._set_input('MaxResults', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((required, string) The refresh token retrieved in the last step of the OAuth process. This is used when an access token is expired or not provided.)
"""
super(GetAllGroupsInputSet, self)._set_input('RefreshToken', value)
def set_StartIndex(self, value):
"""
Set the value of the StartIndex input for this Choreo. ((optional, integer) The index of the first result to be retrieved (for paging).)
"""
super(GetAllGroupsInputSet, self)._set_input('StartIndex', value)
class GetAllGroupsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetAllGroups Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Google.)
"""
return self._output.get('Response', None)
def get_AccessToken(self):
"""
Retrieve the value for the "AccessToken" output from this Choreo execution. ((optional, string) The access token retrieved in the last step of the OAuth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
return self._output.get('AccessToken', None)
class GetAllGroupsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetAllGroupsResultSet(response, path)
| MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Google/Contacts/GetAllGroups.py | Python | gpl-3.0 | 4,869 |
#from setuptools import setup
from distutils.core import setup
setup(
name='pilfer',
version='2.0',
description='pilfer command line tool to record audio and video from Kodi',
url='https://github.com/NapoleonWils0n/pilfer',
author='NapoleonWils0n',
maintainer='NapoleonWils0n',
license='GPL',
keywords='ffmpeg rtmpdump kodi',
packages=['pilfer'],
#scripts=['pilfer/pilfer', 'pilfer/pilfer-play'],
entry_points={
'console_scripts': [
'pilfer = pilfer.pilfer:entry',
'pilferplay = pilfer.pilferplay:entryplay',
],
}
)
| NapoleonWils0n/pilfer | setup.py | Python | gpl-3.0 | 605 |
from .waresponseparser import ResponseParser
from yowsup.env import YowsupEnv
import sys
import logging
from axolotl.ecc.curve import Curve
from axolotl.ecc.ec import ECPublicKey
from yowsup.common.tools import WATools
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from yowsup.config.v1.config import Config
from yowsup.profile.profile import YowProfile
import struct
import random
import base64
if sys.version_info < (3, 0):
import httplib
from urllib import quote as urllib_quote
if sys.version_info >= (2, 7, 9):
# see https://github.com/tgalal/yowsup/issues/677
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
else:
from http import client as httplib
from urllib.parse import quote as urllib_quote
logger = logging.getLogger(__name__)
class WARequest(object):
OK = 200
ENC_PUBKEY = Curve.decodePoint(
bytearray([
5, 142, 140, 15, 116, 195, 235, 197, 215, 166, 134, 92, 108,
60, 132, 56, 86, 176, 97, 33, 204, 232, 234, 119, 77, 34, 251,
111, 18, 37, 18, 48, 45
])
)
def __init__(self, config_or_profile):
"""
:type method: str
:param config_or_profile:
:type config: yowsup.config.v1.config.Config | YowProfile
"""
self.pvars = []
self.port = 443
self.type = "GET"
self.parser = None
self.params = []
self.headers = {}
self.sent = False
self.response = None
if isinstance(config_or_profile, Config):
logger.warning("Passing Config to WARequest is deprecated, pass a YowProfile instead")
profile = YowProfile(config_or_profile.phone, config_or_profile)
else:
assert isinstance(config_or_profile, YowProfile)
profile = config_or_profile
self._config = profile.config
config = self._config
self._p_in = str(config.phone)[len(str(config.cc)):]
self._axolotlmanager = profile.axolotl_manager
if config.expid is None:
config.expid = WATools.generateDeviceId()
if config.fdid is None:
config.fdid = WATools.generatePhoneId()
if config.client_static_keypair is None:
config.client_static_keypair = WATools.generateKeyPair()
self.addParam("cc", config.cc)
self.addParam("in", self._p_in)
self.addParam("lg", "en")
self.addParam("lc", "GB")
self.addParam("mistyped", "6")
self.addParam("authkey", self.b64encode(config.client_static_keypair.public.data))
self.addParam("e_regid", self.b64encode(struct.pack('>I', self._axolotlmanager.registration_id)))
self.addParam("e_keytype", self.b64encode(b"\x05"))
self.addParam("e_ident", self.b64encode(self._axolotlmanager.identity.publicKey.serialize()[1:]))
signedprekey = self._axolotlmanager.load_latest_signed_prekey(generate=True)
self.addParam("e_skey_id", self.b64encode(struct.pack('>I', signedprekey.getId())[1:]))
self.addParam("e_skey_val", self.b64encode(signedprekey.getKeyPair().publicKey.serialize()[1:]))
self.addParam("e_skey_sig", self.b64encode(signedprekey.getSignature()))
self.addParam("fdid", config.fdid)
self.addParam("expid", self.b64encode(config.expid))
self.addParam("network_radio_type", "1")
self.addParam("simnum", "1")
self.addParam("hasinrc", "1")
self.addParam("pid", int(random.uniform(100, 9999)))
self.addParam("rc", 0)
if self._config.id:
self.addParam("id", self._config.id)
def setParsableVariables(self, pvars):
self.pvars = pvars
def onResponse(self, name, value):
if name == "status":
self.status = value
elif name == "result":
self.result = value
def addParam(self, name, value):
self.params.append((name, value))
def removeParam(self, name):
for i in range(0, len(self.params)):
if self.params[i][0] == name:
del self.params[i]
def addHeaderField(self, name, value):
self.headers[name] = value
def clearParams(self):
self.params = []
def getUserAgent(self):
return YowsupEnv.getCurrent().getUserAgent()
def send(self, parser=None, encrypt=True, preview=False):
logger.debug("send(parser=%s, encrypt=%s, preview=%s)" % (
None if parser is None else "[omitted]",
encrypt, preview
))
if self.type == "POST":
return self.sendPostRequest(parser)
return self.sendGetRequest(parser, encrypt, preview=preview)
def setParser(self, parser):
if isinstance(parser, ResponseParser):
self.parser = parser
else:
logger.error("Invalid parser")
def getConnectionParameters(self):
if not self.url:
return "", "", self.port
try:
url = self.url.split("://", 1)
url = url[0] if len(url) == 1 else url[1]
host, path = url.split('/', 1)
except ValueError:
host = url
path = ""
path = "/" + path
return host, self.port, path
def encryptParams(self, params, key):
"""
:param params:
:type params: list
:param key:
:type key: ECPublicKey
:return:
:rtype: list
"""
keypair = Curve.generateKeyPair()
encodedparams = self.urlencodeParams(params)
cipher = AESGCM(Curve.calculateAgreement(key, keypair.privateKey))
ciphertext = cipher.encrypt(b'\x00\x00\x00\x00' + struct.pack('>Q', 0), encodedparams.encode(), b'')
payload = base64.b64encode(keypair.publicKey.serialize()[1:] + ciphertext)
return [('ENC', payload)]
def sendGetRequest(self, parser=None, encrypt_params=True, preview=False):
logger.debug("sendGetRequest(parser=%s, encrypt_params=%s, preview=%s)" % (
None if parser is None else "[omitted]",
encrypt_params, preview
))
self.response = None
if encrypt_params:
logger.debug("Encrypting parameters")
if logger.level <= logging.DEBUG:
logger.debug("pre-encrypt (encoded) parameters = \n%s", (self.urlencodeParams(self.params)))
params = self.encryptParams(self.params, self.ENC_PUBKEY)
else:
## params will be logged right before sending
params = self.params
parser = parser or self.parser or ResponseParser()
headers = dict(
list(
{
"User-Agent": self.getUserAgent(),
"Accept": parser.getMeta()
}.items()
) + list(self.headers.items()))
host, port, path = self.getConnectionParameters()
self.response = WARequest.sendRequest(host, port, path, headers, params, "GET", preview=preview)
if preview:
logger.info("Preview request, skip response handling and return None")
return None
if not self.response.status == WARequest.OK:
logger.error("Request not success, status was %s" % self.response.status)
return {}
data = self.response.read()
logger.info(data)
self.sent = True
return parser.parse(data.decode(), self.pvars)
def sendPostRequest(self, parser=None):
self.response = None
params = self.params # [param.items()[0] for param in self.params];
parser = parser or self.parser or ResponseParser()
headers = dict(list({"User-Agent": self.getUserAgent(),
"Accept": parser.getMeta(),
"Content-Type": "application/x-www-form-urlencoded"
}.items()) + list(self.headers.items()))
host, port, path = self.getConnectionParameters()
self.response = WARequest.sendRequest(host, port, path, headers, params, "POST")
if not self.response.status == WARequest.OK:
logger.error("Request not success, status was %s" % self.response.status)
return {}
data = self.response.read()
logger.info(data)
self.sent = True
return parser.parse(data.decode(), self.pvars)
def b64encode(self, value):
return base64.urlsafe_b64encode(value).replace(b'=', b'')
@classmethod
def urlencode(cls, value):
if type(value) not in (str, bytes):
value = str(value)
out = ""
for char in value:
if type(char) is int:
char = bytearray([char])
quoted = urllib_quote(char, safe='')
out += quoted if quoted[0] != '%' else quoted.lower()
return out \
.replace('-', '%2d') \
.replace('_', '%5f') \
.replace('~', '%7e')
@classmethod
def urlencodeParams(cls, params):
merged = []
for k, v in params:
merged.append(
"%s=%s" % (k, cls.urlencode(v))
)
return "&".join(merged)
@classmethod
def sendRequest(cls, host, port, path, headers, params, reqType="GET", preview=False):
logger.debug("sendRequest(host=%s, port=%s, path=%s, headers=%s, params=%s, reqType=%s, preview=%s)" % (
host, port, path, headers, params, reqType, preview
))
params = cls.urlencodeParams(params)
path = path + "?" + params if reqType == "GET" and params else path
if not preview:
logger.debug("Opening connection to %s" % host)
conn = httplib.HTTPSConnection(host, port) if port == 443 else httplib.HTTPConnection(host, port)
else:
logger.debug("Should open connection to %s, but this is a preview" % host)
conn = None
if not preview:
logger.debug("Sending %s request to %s" % (reqType, path))
conn.request(reqType, path, params, headers)
else:
logger.debug("Should send %s request to %s, but this is a preview" % (reqType, path))
return None
response = conn.getresponse()
return response
| tgalal/yowsup | yowsup/common/http/warequest.py | Python | gpl-3.0 | 10,328 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is the module where the main solver object for the
nonlinear solver of bolt is defined. This solver object
stores the details of the system defined under physical_system,
and is evolved using the methods of this module.
The solver has the option of using 2 different methods:
- A semi-lagrangian scheme based on Cheng-Knorr(1978) which
uses advective interpolation.(non-conservative)
- The interpolation schemes available are
linear and cubic spline.
- Finite volume scheme(conservative):
- Riemann solvers available are the local Lax-Friedrichs and 1st order
upwind scheme.
- The reconstruction schemes available are minmod, PPM, and WENO5
"""
# Importing dependencies:
import arrayfire as af
import numpy as np
import petsc4py, sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import socket
# Importing solver libraries:
from . import communicate
from . import boundaries
from . import timestep
from .file_io import dump
from .file_io import load
from .utils.bandwidth_test import bandwidth_test
from .utils.print_with_indent import indent
from .utils.performance_timings import print_table
from .utils.broadcasted_primitive_operations import multiply
from .compute_moments import compute_moments as compute_moments_imported
from .fields.fields import fields_solver
class nonlinear_solver(object):
"""
An instance of this class' attributes contains methods which are used
in evolving the system declared under physical system nonlinearly. The
state of the system then may be determined from the attributes of the
system such as the distribution function and electromagnetic fields.
Relevant physical information is obtained by coarse graining this system
by taking moments of the distribution function. This is performed by the
compute_moments() method.
"""
def __init__(self, physical_system, performance_test_flag = False):
"""
Constructor for the nonlinear_solver object. It takes the physical
system object as an argument and uses it in intialization and
evolution of the system in consideration.
Additionally, a performance test flag is also passed which when true,
stores time which is consumed by each of the major solver routines.
This proves particularly useful in analyzing performance bottlenecks
and obtaining benchmarks.
Parameters:
-----------
physical_system: The defined physical system object which holds
all the simulation information such as the initial
conditions, and the domain info is passed as an
argument in defining an instance of the
nonlinear_solver. This system is then evolved, and
monitored using the various methods under the
nonlinear_solver class.
"""
self.physical_system = physical_system
# Holding Domain Info:
self.q1_start, self.q1_end = physical_system.q1_start,\
physical_system.q1_end
self.q2_start, self.q2_end = physical_system.q2_start,\
physical_system.q2_end
self.p1_start, self.p1_end = physical_system.p1_start,\
physical_system.p1_end
self.p2_start, self.p2_end = physical_system.p2_start,\
physical_system.p2_end
self.p3_start, self.p3_end = physical_system.p3_start,\
physical_system.p3_end
# Holding Domain Resolution:
self.N_q1, self.dq1 = physical_system.N_q1, physical_system.dq1
self.N_q2, self.dq2 = physical_system.N_q2, physical_system.dq2
self.N_p1, self.dp1 = physical_system.N_p1, physical_system.dp1
self.N_p2, self.dp2 = physical_system.N_p2, physical_system.dp2
self.N_p3, self.dp3 = physical_system.N_p3, physical_system.dp3
# Getting number of ghost zones, and the boundary
# conditions that are utilized:
N_g_q = self.N_ghost_q = physical_system.N_ghost_q
N_g_p = self.N_ghost_p = physical_system.N_ghost_p
self.boundary_conditions = physical_system.boundary_conditions
# Declaring the communicator:
self._comm = PETSc.COMM_WORLD.tompi4py()
if(self.physical_system.params.num_devices>1):
af.set_device(self._comm.rank%self.physical_system.params.num_devices)
# Getting number of species:
self.N_species = len(physical_system.params.mass)
# Having the mass and charge along axis 1:
self.physical_system.params.mass = \
af.cast(af.moddims(af.to_array(physical_system.params.mass),
1, self.N_species
),
af.Dtype.f64
)
self.physical_system.params.charge = \
af.cast(af.moddims(af.to_array(physical_system.params.charge),
1, self.N_species
),
af.Dtype.f64
)
PETSc.Sys.Print('\nBackend Details for Nonlinear Solver:')
# Printing the backend details for each rank/device/node:
PETSc.Sys.syncPrint(indent('Rank ' + str(self._comm.rank) + ' of ' + str(self._comm.size-1)))
PETSc.Sys.syncPrint(indent('On Node: '+ socket.gethostname()))
PETSc.Sys.syncPrint(indent('Device Details:'))
PETSc.Sys.syncPrint(indent(af.info_str(), 2))
PETSc.Sys.syncPrint(indent('Device Bandwidth = ' + str(bandwidth_test(100)) + ' GB / sec'))
PETSc.Sys.syncPrint()
PETSc.Sys.syncFlush()
self.performance_test_flag = performance_test_flag
# Initializing variables which are used to time the components of the solver:
if(performance_test_flag == True):
self.time_ts = 0
self.time_interp2 = 0
self.time_sourcets = 0
self.time_fvm_solver = 0
self.time_reconstruct = 0
self.time_riemann = 0
self.time_fieldstep = 0
self.time_interp3 = 0
self.time_apply_bcs_f = 0
self.time_communicate_f = 0
petsc_bc_in_q1 = 'ghosted'
petsc_bc_in_q2 = 'ghosted'
# Only for periodic boundary conditions or shearing-box boundary conditions
# do the boundary conditions passed to the DA need to be changed. PETSc
# automatically handles the application of periodic boundary conditions when
# running in parallel. For shearing box boundary conditions, an interpolation
# operation needs to be applied on top of the periodic boundary conditions.
# In all other cases, ghosted boundaries are used.
if( self.boundary_conditions.in_q1_left == 'periodic'
or self.boundary_conditions.in_q1_left == 'shearing-box'
):
petsc_bc_in_q1 = 'periodic'
if( self.boundary_conditions.in_q2_bottom == 'periodic'
or self.boundary_conditions.in_q2_bottom == 'shearing-box'
):
petsc_bc_in_q2 = 'periodic'
if(self.boundary_conditions.in_q1_left == 'periodic'):
try:
assert(self.boundary_conditions.in_q1_right == 'periodic')
except:
raise Exception('Periodic boundary conditions need to be applied to \
both the boundaries of a particular axis'
)
if(self.boundary_conditions.in_q1_left == 'shearing-box'):
try:
assert(self.boundary_conditions.in_q1_right == 'shearing-box')
except:
raise Exception('Shearing box boundary conditions need to be applied to \
both the boundaries of a particular axis'
)
if(self.boundary_conditions.in_q2_bottom == 'periodic'):
try:
assert(self.boundary_conditions.in_q2_top == 'periodic')
except:
raise Exception('Periodic boundary conditions need to be applied to \
both the boundaries of a particular axis'
)
if(self.boundary_conditions.in_q2_bottom == 'shearing-box'):
try:
assert(self.boundary_conditions.in_q2_top == 'shearing-box')
except:
raise Exception('Shearing box boundary conditions need to be applied to \
both the boundaries of a particular axis'
)
nproc_in_q1 = PETSc.DECIDE
nproc_in_q2 = PETSc.DECIDE
# Since shearing boundary conditions require interpolations which are non-local:
if(self.boundary_conditions.in_q2_bottom == 'shearing-box'):
nproc_in_q1 = 1
if(self.boundary_conditions.in_q1_left == 'shearing-box'):
nproc_in_q2 = 1
# DMDA is a data structure to handle a distributed structure
# grid and its related core algorithms. It stores metadata of
# how the grid is partitioned when run in parallel which is
# utilized by the various methods of the solver.
self._da_f = PETSc.DMDA().create([self.N_q1, self.N_q2],
dof = ( self.N_species
* (self.N_p1 + 2 * N_g_p)
* (self.N_p2 + 2 * N_g_p)
* (self.N_p3 + 2 * N_g_p)
),
stencil_width = N_g_q,
boundary_type = (petsc_bc_in_q1,
petsc_bc_in_q2
),
proc_sizes = (nproc_in_q1,
nproc_in_q2
),
stencil_type = 1,
comm = self._comm
)
# This DA is used by the FileIO routine dump_distribution_function():
self._da_dump_f = PETSc.DMDA().create([self.N_q1, self.N_q2],
dof = ( self.N_species
* self.N_p1
* self.N_p2
* self.N_p3
),
stencil_width = N_g_q,
boundary_type = (petsc_bc_in_q1,
petsc_bc_in_q2
),
proc_sizes = (nproc_in_q1,
nproc_in_q2
),
stencil_type = 1,
comm = self._comm
)
# This DA is used by the FileIO routine dump_moments():
# Finding the number of definitions for the moments:
attributes = [a for a in dir(self.physical_system.moments) if not a.startswith('_')]
# Removing utility functions:
if('integral_over_v' in attributes):
attributes.remove('integral_over_v')
self._da_dump_moments = PETSc.DMDA().create([self.N_q1, self.N_q2],
dof = self.N_species
* len(attributes),
proc_sizes = (nproc_in_q1,
nproc_in_q2
),
comm = self._comm
)
# Creation of the local and global vectors from the DA:
# This is for the distribution function
self._glob_f = self._da_f.createGlobalVec()
self._local_f = self._da_f.createLocalVec()
# The following vector is used to dump the data to file:
self._glob_dump_f = self._da_dump_f.createGlobalVec()
self._glob_moments = self._da_dump_moments.createGlobalVec()
# Getting the arrays for the above vectors:
self._glob_f_array = self._glob_f.getArray()
self._local_f_array = self._local_f.getArray()
self._glob_moments_array = self._glob_moments.getArray()
self._glob_dump_f_array = self._glob_dump_f.getArray()
# Setting names for the objects which will then be
# used as the key identifiers for the HDF5 files:
PETSc.Object.setName(self._glob_dump_f, 'distribution_function')
PETSc.Object.setName(self._glob_moments, 'moments')
# Obtaining the array values of the cannonical variables:
self.q1_center, self.q2_center = self._calculate_q_center()
self.p1_center, self.p2_center, self.p3_center = self._calculate_p_center()
# Initialize according to initial condition provided by user:
self._initialize(physical_system.params)
# Obtaining start coordinates for the local zone
# Additionally, we also obtain the size of the local zone
((i_q1_start, i_q2_start), (N_q1_local, N_q2_local)) = self._da_f.getCorners()
(i_q1_end, i_q2_end) = (i_q1_start + N_q1_local - 1, i_q2_start + N_q2_local - 1)
# Applying dirichlet boundary conditions:
if(self.physical_system.boundary_conditions.in_q1_left == 'dirichlet'):
# If local zone includes the left physical boundary:
if(i_q1_start == 0):
self.f[:, :N_g_q] = self.boundary_conditions.\
f_left(self.f, self.q1_center, self.q2_center,
self.p1_center, self.p2_center, self.p3_center,
self.physical_system.params
)[:, :N_g_q]
if(self.physical_system.boundary_conditions.in_q1_right == 'dirichlet'):
# If local zone includes the right physical boundary:
if(i_q1_end == self.N_q1 - 1):
self.f[:, -N_g_q:] = self.boundary_conditions.\
f_right(self.f, self.q1_center, self.q2_center,
self.p1_center, self.p2_center, self.p3_center,
self.physical_system.params
)[:, -N_g_q:]
if(self.physical_system.boundary_conditions.in_q2_bottom == 'dirichlet'):
# If local zone includes the bottom physical boundary:
if(i_q2_start == 0):
self.f[:, :, :N_g_q] = self.boundary_conditions.\
f_bot(self.f, self.q1_center, self.q2_center,
self.p1_center, self.p2_center, self.p3_center,
self.physical_system.params
)[:, :, :N_g_q]
if(self.physical_system.boundary_conditions.in_q2_top == 'dirichlet'):
# If local zone includes the top physical boundary:
if(i_q2_end == self.N_q2 - 1):
self.f[:, :, -N_g_q:] = self.boundary_conditions.\
f_top(self.f, self.q1_center, self.q2_center,
self.p1_center, self.p2_center, self.p3_center,
self.physical_system.params
)[:, :, -N_g_q:]
# Assigning the value to the PETSc Vecs(for dump at t = 0):
(af.flat(self.f)).to_ndarray(self._local_f_array)
(af.flat(self.f[:, :, N_g_q:-N_g_q, N_g_q:-N_g_q])).to_ndarray(self._glob_f_array)
# Assigning the function objects to methods of the solver:
self._A_q = physical_system.A_q
self._C_q = physical_system.C_q
self._A_p = physical_system.A_p
self._C_p = physical_system.C_p
# Source/Sink term:
self._source = physical_system.source
# Initializing a variable to track time-elapsed:
self.time_elapsed = 0
def _convert_to_q_expanded(self, array):
"""
Since we are limited to use 4D arrays due to
the bound from ArrayFire, we define 2 forms
which can be used such that the computations may
carried out along all dimensions necessary:
q_expanded form:(N_p1 * N_p2 * N_p3, N_s, N_q1, N_q2)
p_expanded form:(N_p1, N_p2, N_p3, N_s * N_q1 * N_q2)
This function converts the input array from
p_expanded to q_expanded form.
"""
# Obtaining start coordinates for the local zone
# Additionally, we also obtain the size of the local zone
((i_q1_start, i_q2_start), (N_q1_local, N_q2_local)) = self._da_f.getCorners()
array = af.moddims(array,
(self.N_p1 + 2 * self.N_ghost_p)
* (self.N_p2 + 2 * self.N_ghost_p)
* (self.N_p3 + 2 * self.N_ghost_p),
self.N_species,
(N_q1_local + 2 * self.N_ghost_q),
(N_q2_local + 2 * self.N_ghost_q)
)
af.eval(array)
return (array)
def _convert_to_p_expanded(self, array):
"""
Since we are limited to use 4D arrays due to
the bound from ArrayFire, we define 2 forms
which can be used such that the computations may
carried out along all dimensions necessary:
q_expanded form:(N_p1 * N_p2 * N_p3, N_s, N_q1, N_q2)
p_expanded form:(N_p1, N_p2, N_p3, N_s * N_q1 * N_q2)
This function converts the input array from
q_expanded to p_expanded form.
"""
# Obtaining start coordinates for the local zone
# Additionally, we also obtain the size of the local zone
((i_q1_start, i_q2_start), (N_q1_local, N_q2_local)) = self._da_f.getCorners()
array = af.moddims(array,
self.N_p1 + 2 * self.N_ghost_p,
self.N_p2 + 2 * self.N_ghost_p,
self.N_p3 + 2 * self.N_ghost_p,
self.N_species
* (N_q1_local + 2 * self.N_ghost_q)
* (N_q2_local + 2 * self.N_ghost_q)
)
af.eval(array)
return (array)
def _calculate_q_center(self):
"""
Initializes the cannonical variables q1, q2 using a centered
formulation. The size, and resolution are the same as declared
under domain of the physical system object.
Returns in q_expanded form.
"""
# Obtaining start coordinates for the local zone
# Additionally, we also obtain the size of the local zone
((i_q1_start, i_q2_start), (N_q1_local, N_q2_local)) = self._da_f.getCorners()
i_q1_center = i_q1_start + 0.5
i_q2_center = i_q2_start + 0.5
i_q1 = ( i_q1_center
+ np.arange(-self.N_ghost_q, N_q1_local + self.N_ghost_q)
)
i_q2 = ( i_q2_center
+ np.arange(-self.N_ghost_q, N_q2_local + self.N_ghost_q)
)
q1_center = self.q1_start + i_q1 * self.dq1
q2_center = self.q2_start + i_q2 * self.dq2
q2_center, q1_center = np.meshgrid(q2_center, q1_center)
q1_center, q2_center = af.to_array(q1_center), af.to_array(q2_center)
# To bring the data structure to the default form:(N_p, N_s, N_q1, N_q2)
q1_center = af.reorder(q1_center, 3, 2, 0, 1)
q2_center = af.reorder(q2_center, 3, 2, 0, 1)
af.eval(q1_center, q2_center)
return (q1_center, q2_center)
def _calculate_p_center(self):
"""
Initializes the cannonical variables p1, p2 and p3 using a centered
formulation. The size, and resolution are the same as declared
under domain of the physical system object.
"""
p1_center = self.p1_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p1 + self.N_ghost_p
)
) * self.dp1
p2_center = self.p2_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p2 + self.N_ghost_p
)
) * self.dp2
p3_center = self.p3_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p3 + self.N_ghost_p
)
) * self.dp3
p2_center, p1_center, p3_center = np.meshgrid(p2_center,
p1_center,
p3_center
)
# Flattening the arrays:
p1_center = af.flat(af.to_array(p1_center))
p2_center = af.flat(af.to_array(p2_center))
p3_center = af.flat(af.to_array(p3_center))
if(self.N_species > 1):
p1_center = af.tile(p1_center, 1, self.N_species)
p2_center = af.tile(p2_center, 1, self.N_species)
p3_center = af.tile(p3_center, 1, self.N_species)
af.eval(p1_center, p2_center, p3_center)
return (p1_center, p2_center, p3_center)
def _calculate_p_left(self):
p1_left = self.p1_start + np.arange(-self.N_ghost_p,
self.N_p1 + self.N_ghost_p
) * self.dp1
p2_center = self.p2_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p2 + self.N_ghost_p
)
) * self.dp2
p3_center = self.p3_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p3 + self.N_ghost_p
)
) * self.dp3
p2_left, p1_left, p3_left = np.meshgrid(p2_center,
p1_left,
p3_center
)
# Flattening the arrays:
p1_left = af.flat(af.to_array(p1_left))
p2_left = af.flat(af.to_array(p2_left))
p3_left = af.flat(af.to_array(p3_left))
if(self.N_species > 1):
p1_left = af.tile(p1_left, 1, self.N_species)
p2_left = af.tile(p2_left, 1, self.N_species)
p3_left = af.tile(p3_left, 1, self.N_species)
af.eval(p1_left, p2_left, p3_left)
return (p1_left, p2_left, p3_left)
def _calculate_p_bottom(self):
p1_center = self.p1_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p1 + self.N_ghost_p
)
) * self.dp1
p2_bottom = self.p2_start + np.arange(-self.N_ghost_p,
self.N_p2 + self.N_ghost_p
) * self.dp2
p3_center = self.p3_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p3 + self.N_ghost_p
)
) * self.dp3
p2_bottom, p1_bottom, p3_bottom = np.meshgrid(p2_bottom,
p1_center,
p3_center
)
# Flattening the arrays:
p1_bottom = af.flat(af.to_array(p1_bottom))
p2_bottom = af.flat(af.to_array(p2_bottom))
p3_bottom = af.flat(af.to_array(p3_bottom))
if(self.N_species > 1):
p1_bottom = af.tile(p1_bottom, 1, self.N_species)
p2_bottom = af.tile(p2_bottom, 1, self.N_species)
p3_bottom = af.tile(p3_bottom, 1, self.N_species)
af.eval(p1_bottom, p2_bottom, p3_bottom)
return (p1_bottom, p2_bottom, p3_bottom)
def _calculate_p_back(self):
p1_center = self.p1_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p1 + self.N_ghost_p
)
) * self.dp1
p2_center = self.p2_start + (0.5 + np.arange(-self.N_ghost_p,
self.N_p2 + self.N_ghost_p
)
) * self.dp2
p3_back = self.p3_start + np.arange(-self.N_ghost_p,
self.N_p3 + self.N_ghost_p
) * self.dp3
p2_back, p1_back, p3_back = np.meshgrid(p2_center,
p1_center,
p3_center
)
# Flattening the arrays:
p1_back = af.flat(af.to_array(p1_back))
p2_back = af.flat(af.to_array(p2_back))
p3_back = af.flat(af.to_array(p3_back))
if(self.N_species > 1):
p1_back = af.tile(p1_back, 1, self.N_species)
p2_back = af.tile(p2_back, 1, self.N_species)
p3_back = af.tile(p3_back, 1, self.N_species)
af.eval(p1_back, p2_back, p3_back)
return (p1_back, p2_back, p3_back)
def _initialize(self, params):
"""
Called when the solver object is declared. This function is
used to initialize the distribution function, using the options
as provided by the user.
Parameters
----------
params : file/object
params contains all details of which methods to use
in addition to useful physical constant. Additionally,
it can also be used to inject methods which need to be
used inside some solver routine
"""
# Initializing with the provided I.C's:
# af.broadcast, allows us to perform batched operations
# when operating on arrays of different sizes
# af.broadcast(function, *args) performs batched
# operations on function(*args)
self.f = af.broadcast(self.physical_system.initial_conditions.\
initialize_f, self.q1_center, self.q2_center,
self.p1_center, self.p2_center, self.p3_center, params
)
self.f_initial = self.f
if(self.physical_system.params.EM_fields_enabled):
rho_initial = multiply(self.physical_system.params.charge,
self.compute_moments('density')
)
self.fields_solver = fields_solver(self.N_q1, self.N_q2, self.N_ghost_q,
self.q1_center, self.q2_center,
self.dq1, self.dq2, self._comm,
self.boundary_conditions,
self.physical_system.params,
rho_initial, self.performance_test_flag
)
# Injection of solver functions into class as methods:
_communicate_f = communicate.\
communicate_f
_apply_bcs_f = boundaries.apply_bcs_f
strang_timestep = timestep.strang_step
lie_timestep = timestep.lie_step
swss_timestep = timestep.swss_step
jia_timestep = timestep.jia_step
compute_moments = compute_moments_imported
dump_distribution_function = dump.dump_distribution_function
dump_moments = dump.dump_moments
dump_EM_fields = dump.dump_EM_fields
load_distribution_function = load.load_distribution_function
load_EM_fields = load.load_EM_fields
print_performance_timings = print_table
| ShyamSS-95/Bolt | bolt/lib/nonlinear/nonlinear_solver.py | Python | gpl-3.0 | 29,921 |
#!/usr/bin/env python3
import copy
import matplotlib.pyplot as plt
import numpy as np
import pytest
from pysisyphus.plotters.AnimPlot import AnimPlot
from pysisyphus.calculators.MullerBrownPot import MullerBrownPot
#from pysisyphus.calculators.MullerBrownSympyPot import MullerBrownPot
from pysisyphus.cos.NEB import NEB
from pysisyphus.cos.SimpleZTS import SimpleZTS
from pysisyphus.optimizers.FIRE import FIRE
from pysisyphus.optimizers.BFGS import BFGS
from pysisyphus.optimizers.LBFGS import LBFGS
from pysisyphus.Geometry import Geometry
from pysisyphus.optimizers.SteepestDescent import SteepestDescent
KWARGS = {
"images": 4,
"max_cycles": 100,
"max_step": 0.02,
"convergence": {
"max_force_thresh": 0.1,
"rms_force_thresh": 0.02,
"max_step_thresh": 0.005,
"rms_step_thresh": 0.001,
},
"dump": False,
}
def get_geoms(keys=("B","C","TSA","A")):
coords_dict = {
"A": (-0.558, 1.442, 0), # Minimum A
"B": (0.6215, 0.02838, 0), # Minimum B
"C": (-0.05, 0.467, 0), # Minimum C
"AC": (-0.57, 0.8, 0), # Between A and C
"TSA": (-0.822, 0.624, 0) # Saddle point A
}
coords = [np.array(coords_dict[k]) for k in keys]
atoms = ("H")
geoms = [Geometry(atoms, c) for c in coords]
return geoms
def run_cos_opt(cos, Opt, images, **kwargs):
cos.interpolate(images)
opt = Opt(cos, **kwargs)
for img in cos.images:
img.set_calculator(MullerBrownPot())
opt.run()
return opt
def animate(opt):
xlim = (-1.75, 1.25)
ylim = (-0.5, 2.25)
levels=(-150, -15, 40)
ap = AnimPlot(MullerBrownPot(), opt, xlim=xlim, ylim=ylim, levels=levels)
ap.animate()
@pytest.mark.sd
def test_steepest_descent_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 4
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 56)
return opt
@pytest.mark.sd
def test_steepest_descent_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_cycles"] = 100
convergence = {
"max_force_thresh": 1.16,
"rms_force_thresh": 0.27,
"max_step_thresh": 0.021,
"rms_step_thresh": 0.005,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 62)
return opt
@pytest.mark.bfgs
def test_bfgs_straight_neb():
"""Something is really really wrong here."""
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"max_force_thresh": 5.0,
"rms_force_thresh": 1,
"max_step_thresh": 0.002,
"rms_step_thresh": 0.0006,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms(("A", "B")))
opt = run_cos_opt(neb, BFGS, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 45)
return opt
@pytest.mark.lbfgs
def test_lbfgs_neb():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 3
kwargs["fix_ends"] = True
k_min = 1000
k_max = k_min+10
neb = NEB(get_geoms(("A", "B")), k_min=k_min, k_max=k_max, fix_ends=True)
from pysisyphus.optimizers.ConjugateGradient import ConjugateGradient
# from pysisyphus.optimizers.LBFGS_mod import LBFGS
opt = run_cos_opt(neb, LBFGS, **kwargs)
# assert(opt.is_converged)
# assert(opt.cur_cycle == 45)
return opt
@pytest.mark.sd
def test_steepest_descent_neb_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"max_force_thresh": 0.6,
"rms_force_thresh": 0.13,
"max_step_thresh": 0.015,
"rms_step_thresh": 0.0033,
}
kwargs["convergence"] = convergence
neb = NEB(get_geoms())
opt = run_cos_opt(neb, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 41)
return opt
@pytest.mark.fire
def test_fire_neb():
kwargs = copy.copy(KWARGS)
kwargs["dt"] = 0.01
kwargs["dt_max"] = 0.1
neb = NEB(get_geoms())
opt = run_cos_opt(neb, FIRE, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 76)
return opt
def test_equal_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 17)
return opt
def test_equal_szts_straight():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
kwargs["max_step"] = 0.04
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(("A", "B")), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
return opt
def test_equal_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 7
convergence = {
"rms_force_thresh": 2.4,
}
kwargs["convergence"] = convergence
szts_equal = SimpleZTS(get_geoms(), param="equal")
opt = run_cos_opt(szts_equal, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 21)
return opt
def test_energy_szts():
kwargs = copy.copy(KWARGS)
convergence = {
"rms_force_thresh": 2.8,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 15)
return opt
def test_energy_szts_more_images():
kwargs = copy.copy(KWARGS)
kwargs["images"] = 10
convergence = {
"rms_force_thresh": 1.7,
}
kwargs["convergence"] = convergence
szts_energy = SimpleZTS(get_geoms(), param="energy")
opt = run_cos_opt(szts_energy, SteepestDescent, **kwargs)
assert(opt.is_converged)
assert(opt.cur_cycle == 22)
return opt
if __name__ == "__main__":
# Steepest Descent
opt = test_steepest_descent_neb()
#opt = test_steepest_descent_straight_neb()
#opt = test_steepest_descent_neb_more_images()
# opt = test_bfgs_straight_neb()
# opt = test_lbfgs_neb()
# FIRE
#opt = test_fire_neb()
# SimpleZTS
#opt = test_equal_szts()
#opt = test_equal_szts_straight()
#opt = test_equal_szts_more_images()
#opt = test_energy_szts()
#opt = test_energy_szts_more_images()
ap = animate(opt)
plt.show()
| eljost/pysisyphus | tests_staging/test_mullerbrownpot.py | Python | gpl-3.0 | 6,681 |
from Timeline.Server.Constants import TIMELINE_LOGGER
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twistar.dbobject import DBObject
from twistar.registry import Registry
from collections import deque
import logging, time, json
class Penguin(DBObject):
HASONE = ['avatar', 'currency', 'ninja']
HASMANY = ['assets', 'bans', 'careItems', 'coins', 'friends', 'ignores', 'requests', 'inventories', 'mails', 'memberships',
'musicTracks', 'puffles', 'stamps', 'stampCovers', 'igloos']
class Coin(DBObject):
pass
class Igloo(DBObject):
HASMANY = ['iglooFurnitures', 'iglooLikes']
@inlineCallbacks
def get_likes_count(self):
likes = yield Registry.getConfig().execute("SELECT COALESCE(SUM(likes), 0) FROM igloo_likes where "
"igloo_id = %s" % (self.id))
returnValue(likes[0][0])
@inlineCallbacks
def get_furnitures(self):
furnitures = yield self.iglooFurnitures.get()
returnValue(furnitures)
@inlineCallbacks
def get_furnitures_string(self):
furnitures = yield self.get_furnitures()
furn_data = map(lambda i: '|'.join(map(str, map(int, [i.furn_id, i.x, i.y, i.rotate, i.frame]))), furnitures)
returnValue(','.join(furn_data))
@inlineCallbacks
def updateFurnitures(self, furnitures):
yield self.refresh()
yield IglooFurniture.deleteAll(where=['igloo_id = ?', self.id])
furn = [IglooFurniture(igloo_id=self.id, furn_id=x[0], x=x[1], y=x[2], rotate=x[3], frame=x[4])
for x in furnitures]
[(yield i.save()) for i in furn]
yield self.iglooFurnitures.set(furn)
class IglooFurniture(DBObject):
pass
class IglooLike(DBObject):
def get_time(self):
return int(time.mktime(self.time.timetuple()))
class Avatar(DBObject):
pass
class Currency(DBObject):
pass
class Ninja(DBObject):
pass
class Asset(DBObject):
def getPurchasedTimestamp(self):
return int(time.mktime(self.purchased.timetuple()))
class Ban(DBObject):
def banned(self):
return hours > 0
def hours(self):
expire = int(time.mktime(self.expire.timetuple()))
hours = (expire - time.time()) / (60 * 60.0) if expire > time.time() else 0
return hours
class CareItem(DBObject):
pass
class Friend(DBObject):
friend_id = -1
class Ignore(DBObject):
pass
class Request(DBObject):
pass
class Inventory(DBObject):
pass
class Mail(DBObject):
def get_sent_on(self):
return int(time.mktime(self.sent_on.timetuple()))
class Membership(DBObject):
pass
class MusicTrack(DBObject):
shared = False
def __len__(self):
return self.length
def __str__(self, withNotes = False):
if not withNotes:
return '|'.join(map(str, [self.id, self.name, int(self.shared), self.likes]))
return '%'.join(map(str, [self.id, self.name, int(self.shared), self.notes, self.hash, self.likes]))
def __int__(self):
return self.id
class Puffle(DBObject):
state = x = y = 0
def __str__(self):
# puffle id|type|sub_type|name|adoption|food|play|rest|clean|hat|x|y|is_walking
return '|'.join(map(str, [int(self.id), int(self.type), self.subtype if int(self.subtype) != 0 else '',
self.name, self.adopt(), int(self.food), int(self.play), int(self.rest),
int(self.clean), int(self.hat), int(self.x), int(self.y), int(self.walking)]))
def adopt(self):
return int(time.mktime(self.adopted.timetuple()))
def updatePuffleStats(self, engine):
care_history = json.loads(self.lastcare)
now = time.time()
if care_history is None or len(care_history) < 1 or bool(int(self.backyard)) or self.walking:
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
return # ULTIMATE PUFFLE <indefinite health and energy>
last_fed = care_history['food']
last_played = care_history['play']
last_bathed = care_history['bath']
food, play, clean = int(self.food), int(self.play), int(self.clean)
puffleCrumb = engine.puffleCrumbs[self.subtype]
max_food, max_play, max_clean = puffleCrumb.hunger, 100, puffleCrumb.health
self.rest = 100 # It's in the igloo all this time?
self.save()
''' It afterall is a poor creature to be taken care of.
if not int(puffle.id) in self.penguin.engine.puffleCrumbs.defautPuffles:
return # They aren't to be taken care of
'''
'''
if remaining % < 10 : send a postcard blaming (hungry, dirty, or unhappy)
if remaining % < 2 : move puffle to pet store, delete puffle, send a postcard, sue 1000 coins as penalty
'''
fed_percent = food - 5 * ((now - last_fed)/86400) # delta_food = -5% per day
play_percent = play - 5 * ((now - last_played)/86400) # delta_play = -5% per day
clean_percent = clean - 10 * ((now - last_bathed)/86400) # delta_clean = -10% per day
total_percent = (fed_percent + play_percent + clean_percent) / 3.0
if fed_percent < 3 or total_percent < 6:
self.backyard = 1
self.food = 100
self.play = 100
self.clean = 100
self.save()
return
if fed_percent < 10:
pid = self.penguin_id
pname = self.name
def sendMail(mail):
if mail is not None:
sent = mail.sent_on
delta = (time.time() - sent)/3600/12
if delta < 1:
return
Mail(penguin_id=pid, from_user=0, type=110, description=str(pname)).save()
last_mail = Mail.find(where=['penguin_id = ? AND type = 110 AND description = ?', self.penguin_id, self.name], orderby='sent_on DESC', limit=1).addCallback(sendMail)
self.food = fed_percent
self.play = play_percent
self.clean = clean_percent
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
class Stamp(DBObject):
def __int__(self):
return int(self.stamp)
class StampCover(DBObject):
pass
class EPFCom(DBObject):
TABLENAME = 'epfcoms'
def getTime(self):
return int(time.mktime(self.time.timetuple()))
def __str__(self):
return '|'.join(map(str, [self.message, self.getTime(), self.mascot]))
class PenguinDB(object):
"""
<Server.Penguin> will extend this to get db operations
Syntax:
def db_<FunctionName> (*a, **kwa): << must be deferred and mustreturn a defer
> recommended to use with inlineCallbacks
"""
def __init__(self):
self.logger = logging.getLogger(TIMELINE_LOGGER)
self.dbpenguin = None
@inlineCallbacks
def db_init(self):
if self.dbpenguin is None:
column, value = 'username', self.penguin.username
if not self.penguin.id is None:
column, value = 'ID', self.penguin.id
elif not self.penguin.swid is None:
column, value = 'swid', self.penguin.swid
self.dbpenguin = yield Penguin.find(where = ['%s = ?' % column, value], limit = 1)
if self.dbpenguin is None:
raise Exception("[TE201] Penguin not found with {1} - {0}".format(value, column))
returnValue(True)
@inlineCallbacks
def db_nicknameUpdate(self, nick):
p_nickname = self.dbpenguin.nickname
self.dbpenguin.nickname = nick
done = self.dbpenguin.save()
if len(done.errors) > 0:
self.dbpenguin.nickname = p_nickname
for error in done.errors:
self.log('error', "[TE200] MySQL update nickname failed. Error :", error)
returnValue(False)
else:
returnValue(True)
@inlineCallbacks
def db_penguinExists(self, criteria = 'ID', value = None):
exists = yield Penguin.exists(["`%s` = ?" % criteria, value])
returnValue(exists)
@inlineCallbacks
def db_getPenguin(self, criteria, *values):
wh = [criteria] + list(values)
p = yield Penguin.find(where = wh, limit = 1)
returnValue(p)
@inlineCallbacks
def db_refresh(self):
yield self.dbpenguin.refresh()
| Times-0/Timeline | Timeline/Database/DB.py | Python | gpl-3.0 | 8,784 |
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2014 Thomas Perl and the gPodder Team
# Copyright (c) 2011 Neal H. Walfield
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# util.py -- Misc utility functions
# Thomas Perl <thp@perli.net> 2007-08-04
#
"""Miscellaneous helper functions for gPodder
This module provides helper and utility functions for gPodder that
are not tied to any specific part of gPodder.
"""
import gpodder
import logging
logger = logging.getLogger(__name__)
import os
import os.path
import platform
import glob
import stat
import shlex
import shutil
import socket
import sys
import string
import re
import subprocess
from htmlentitydefs import entitydefs
import time
import gzip
import datetime
import threading
import urlparse
import urllib
import urllib2
import httplib
import webbrowser
import mimetypes
import itertools
import feedparser
import StringIO
import xml.dom.minidom
if gpodder.ui.win32:
try:
import win32file
except ImportError:
logger.warn('Running on Win32 but win32api/win32file not installed.')
win32file = None
_ = gpodder.gettext
N_ = gpodder.ngettext
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except Exception, e:
logger.warn('Cannot set locale (%s)', e, exc_info=True)
# Native filesystem encoding detection
encoding = sys.getfilesystemencoding()
if encoding is None:
if 'LANG' in os.environ and '.' in os.environ['LANG']:
lang = os.environ['LANG']
(language, encoding) = lang.rsplit('.', 1)
logger.info('Detected encoding: %s', encoding)
elif gpodder.ui.harmattan:
encoding = 'utf-8'
elif gpodder.ui.win32:
# To quote http://docs.python.org/howto/unicode.html:
# ,,on Windows, Python uses the name "mbcs" to refer
# to whatever the currently configured encoding is``
encoding = 'mbcs'
else:
encoding = 'iso-8859-15'
logger.info('Assuming encoding: ISO-8859-15 ($LANG not set).')
# Filename / folder name sanitization
def _sanitize_char(c):
if c in string.whitespace:
return ' '
elif c in ',-.()':
return c
elif c in string.punctuation or ord(c) <= 31:
return '_'
return c
SANITIZATION_TABLE = ''.join(map(_sanitize_char, map(chr, range(256))))
del _sanitize_char
_MIME_TYPE_LIST = [
('.aac', 'audio/aac'),
('.axa', 'audio/annodex'),
('.flac', 'audio/flac'),
('.m4b', 'audio/m4b'),
('.m4a', 'audio/mp4'),
('.mp3', 'audio/mpeg'),
('.spx', 'audio/ogg'),
('.oga', 'audio/ogg'),
('.ogg', 'audio/ogg'),
('.wma', 'audio/x-ms-wma'),
('.3gp', 'video/3gpp'),
('.axv', 'video/annodex'),
('.divx', 'video/divx'),
('.m4v', 'video/m4v'),
('.mp4', 'video/mp4'),
('.ogv', 'video/ogg'),
('.mov', 'video/quicktime'),
('.flv', 'video/x-flv'),
('.mkv', 'video/x-matroska'),
('.wmv', 'video/x-ms-wmv'),
('.opus', 'audio/opus'),
]
_MIME_TYPES = dict((k, v) for v, k in _MIME_TYPE_LIST)
_MIME_TYPES_EXT = dict(_MIME_TYPE_LIST)
def make_directory( path):
"""
Tries to create a directory if it does not exist already.
Returns True if the directory exists after the function
call, False otherwise.
"""
if os.path.isdir( path):
return True
try:
os.makedirs( path)
except:
logger.warn('Could not create directory: %s', path)
return False
return True
def normalize_feed_url(url):
"""
Converts any URL to http:// or ftp:// so that it can be
used with "wget". If the URL cannot be converted (invalid
or unknown scheme), "None" is returned.
This will also normalize feed:// and itpc:// to http://.
>>> normalize_feed_url('itpc://example.org/podcast.rss')
'http://example.org/podcast.rss'
If no URL scheme is defined (e.g. "curry.com"), we will
simply assume the user intends to add a http:// feed.
>>> normalize_feed_url('curry.com')
'http://curry.com/'
There are even some more shortcuts for advanced users
and lazy typists (see the source for details).
>>> normalize_feed_url('fb:43FPodcast')
'http://feeds.feedburner.com/43FPodcast'
It will also take care of converting the domain name to
all-lowercase (because domains are not case sensitive):
>>> normalize_feed_url('http://Example.COM/')
'http://example.com/'
Some other minimalistic changes are also taken care of,
e.g. a ? with an empty query is removed:
>>> normalize_feed_url('http://example.org/test?')
'http://example.org/test'
Username and password in the URL must not be affected
by URL normalization (see gPodder bug 1942):
>>> normalize_feed_url('http://UserName:PassWord@Example.com/')
'http://UserName:PassWord@example.com/'
"""
if not url or len(url) < 8:
return None
# This is a list of prefixes that you can use to minimize the amount of
# keystrokes that you have to use.
# Feel free to suggest other useful prefixes, and I'll add them here.
PREFIXES = {
'fb:': 'http://feeds.feedburner.com/%s',
'yt:': 'http://www.youtube.com/rss/user/%s/videos.rss',
'sc:': 'http://soundcloud.com/%s',
# YouTube playlists. To get a list of playlists per-user, use:
# https://gdata.youtube.com/feeds/api/users/<username>/playlists
'ytpl:': 'http://gdata.youtube.com/feeds/api/playlists/%s',
}
for prefix, expansion in PREFIXES.iteritems():
if url.startswith(prefix):
url = expansion % (url[len(prefix):],)
break
# Assume HTTP for URLs without scheme
if not '://' in url:
url = 'http://' + url
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
# Domain name is case insensitive, but username/password is not (bug 1942)
if '@' in netloc:
authentication, netloc = netloc.rsplit('@', 1)
netloc = '@'.join((authentication, netloc.lower()))
else:
netloc = netloc.lower()
# Schemes and domain names are case insensitive
scheme = scheme.lower()
# Normalize empty paths to "/"
if path == '':
path = '/'
# feed://, itpc:// and itms:// are really http://
if scheme in ('feed', 'itpc', 'itms'):
scheme = 'http'
if scheme not in ('http', 'https', 'ftp', 'file'):
return None
# urlunsplit might return "a slighty different, but equivalent URL"
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def username_password_from_url(url):
r"""
Returns a tuple (username,password) containing authentication
data from the specified URL or (None,None) if no authentication
data can be found in the URL.
See Section 3.1 of RFC 1738 (http://www.ietf.org/rfc/rfc1738.txt)
>>> username_password_from_url('https://@host.com/')
('', None)
>>> username_password_from_url('telnet://host.com/')
(None, None)
>>> username_password_from_url('ftp://foo:@host.com/')
('foo', '')
>>> username_password_from_url('http://a:b@host.com/')
('a', 'b')
>>> username_password_from_url(1)
Traceback (most recent call last):
...
ValueError: URL has to be a string or unicode object.
>>> username_password_from_url(None)
Traceback (most recent call last):
...
ValueError: URL has to be a string or unicode object.
>>> username_password_from_url('http://a@b:c@host.com/')
('a@b', 'c')
>>> username_password_from_url('ftp://a:b:c@host.com/')
('a', 'b:c')
>>> username_password_from_url('http://i%2Fo:P%40ss%3A@host.com/')
('i/o', 'P@ss:')
>>> username_password_from_url('ftp://%C3%B6sterreich@host.com/')
('\xc3\xb6sterreich', None)
>>> username_password_from_url('http://w%20x:y%20z@example.org/')
('w x', 'y z')
>>> username_password_from_url('http://example.com/x@y:z@test.com/')
(None, None)
"""
if type(url) not in (str, unicode):
raise ValueError('URL has to be a string or unicode object.')
(username, password) = (None, None)
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if '@' in netloc:
(authentication, netloc) = netloc.rsplit('@', 1)
if ':' in authentication:
(username, password) = authentication.split(':', 1)
# RFC1738 dictates that we should not allow ['/', '@', ':']
# characters in the username and password field (Section 3.1):
#
# 1. The "/" can't be in there at this point because of the way
# urlparse (which we use above) works.
# 2. Due to gPodder bug 1521, we allow "@" in the username and
# password field. We use netloc.rsplit('@', 1), which will
# make sure that we split it at the last '@' in netloc.
# 3. The colon must be excluded (RFC2617, Section 2) in the
# username, but is apparently allowed in the password. This
# is handled by the authentication.split(':', 1) above, and
# will cause any extraneous ':'s to be part of the password.
username = urllib.unquote(username)
password = urllib.unquote(password)
else:
username = urllib.unquote(authentication)
return (username, password)
def directory_is_writable(path):
"""
Returns True if the specified directory exists and is writable
by the current user.
"""
return os.path.isdir(path) and os.access(path, os.W_OK)
def calculate_size( path):
"""
Tries to calculate the size of a directory, including any
subdirectories found. The returned value might not be
correct if the user doesn't have appropriate permissions
to list all subdirectories of the given path.
"""
if path is None:
return 0L
if os.path.dirname( path) == '/':
return 0L
if os.path.isfile( path):
return os.path.getsize( path)
if os.path.isdir( path) and not os.path.islink( path):
sum = os.path.getsize( path)
try:
for item in os.listdir(path):
try:
sum += calculate_size(os.path.join(path, item))
except:
logger.warn('Cannot get size for %s', path, exc_info=True)
except:
logger.warn('Cannot access %s', path, exc_info=True)
return sum
return 0L
def file_modification_datetime(filename):
"""
Returns the modification date of the specified file
as a datetime.datetime object or None if the modification
date cannot be determined.
"""
if filename is None:
return None
if not os.access(filename, os.R_OK):
return None
try:
s = os.stat(filename)
timestamp = s[stat.ST_MTIME]
return datetime.datetime.fromtimestamp(timestamp)
except:
logger.warn('Cannot get mtime for %s', filename, exc_info=True)
return None
def file_age_in_days(filename):
"""
Returns the age of the specified filename in days or
zero if the modification date cannot be determined.
"""
dt = file_modification_datetime(filename)
if dt is None:
return 0
else:
return (datetime.datetime.now()-dt).days
def file_modification_timestamp(filename):
"""
Returns the modification date of the specified file as a number
or -1 if the modification date cannot be determined.
"""
if filename is None:
return -1
try:
s = os.stat(filename)
return s[stat.ST_MTIME]
except:
logger.warn('Cannot get modification timestamp for %s', filename)
return -1
def file_age_to_string(days):
"""
Converts a "number of days" value to a string that
can be used in the UI to display the file age.
>>> file_age_to_string(0)
''
>>> file_age_to_string(1)
u'1 day ago'
>>> file_age_to_string(2)
u'2 days ago'
"""
if days < 1:
return ''
else:
return N_('%(count)d day ago', '%(count)d days ago', days) % {'count':days}
def is_system_file(filename):
"""
Checks to see if the given file is a system file.
"""
if gpodder.ui.win32 and win32file is not None:
result = win32file.GetFileAttributes(filename)
#-1 is returned by GetFileAttributes when an error occurs
#0x4 is the FILE_ATTRIBUTE_SYSTEM constant
return result != -1 and result & 0x4 != 0
else:
return False
def get_free_disk_space_win32(path):
"""
Win32-specific code to determine the free disk space remaining
for a given path. Uses code from:
http://mail.python.org/pipermail/python-list/2003-May/203223.html
"""
if win32file is None:
# Cannot determine free disk space
return 0
drive, tail = os.path.splitdrive(path)
userFree, userTotal, freeOnDisk = win32file.GetDiskFreeSpaceEx(drive)
return userFree
def get_free_disk_space(path):
"""
Calculates the free disk space available to the current user
on the file system that contains the given path.
If the path (or its parent folder) does not yet exist, this
function returns zero.
"""
if not os.path.exists(path):
return 0
if gpodder.ui.win32:
return get_free_disk_space_win32(path)
s = os.statvfs(path)
return s.f_bavail * s.f_bsize
def format_date(timestamp):
"""
Converts a UNIX timestamp to a date representation. This
function returns "Today", "Yesterday", a weekday name or
the date in %x format, which (according to the Python docs)
is the "Locale's appropriate date representation".
Returns None if there has been an error converting the
timestamp to a string representation.
"""
if timestamp is None:
return None
seconds_in_a_day = 60*60*24
today = time.localtime()[:3]
yesterday = time.localtime(time.time() - seconds_in_a_day)[:3]
try:
timestamp_date = time.localtime(timestamp)[:3]
except ValueError, ve:
logger.warn('Cannot convert timestamp', exc_info=True)
return None
if timestamp_date == today:
return _('Today')
elif timestamp_date == yesterday:
return _('Yesterday')
try:
diff = int( (time.time() - timestamp)/seconds_in_a_day )
except:
logger.warn('Cannot convert "%s" to date.', timestamp, exc_info=True)
return None
try:
timestamp = datetime.datetime.fromtimestamp(timestamp)
except:
return None
if diff < 7:
# Weekday name
return str(timestamp.strftime('%A').decode(encoding))
else:
# Locale's appropriate date representation
return str(timestamp.strftime('%x'))
def format_filesize(bytesize, use_si_units=False, digits=2):
"""
Formats the given size in bytes to be human-readable,
Returns a localized "(unknown)" string when the bytesize
has a negative value.
"""
si_units = (
( 'kB', 10**3 ),
( 'MB', 10**6 ),
( 'GB', 10**9 ),
)
binary_units = (
( 'KiB', 2**10 ),
( 'MiB', 2**20 ),
( 'GiB', 2**30 ),
)
try:
bytesize = float( bytesize)
except:
return _('(unknown)')
if bytesize < 0:
return _('(unknown)')
if use_si_units:
units = si_units
else:
units = binary_units
( used_unit, used_value ) = ( 'B', bytesize )
for ( unit, value ) in units:
if bytesize >= value:
used_value = bytesize / float(value)
used_unit = unit
return ('%.'+str(digits)+'f %s') % (used_value, used_unit)
def delete_file(filename):
"""Delete a file from the filesystem
Errors (permissions errors or file not found)
are silently ignored.
"""
try:
os.remove(filename)
except:
pass
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
if html is None:
return None
# If we would want more speed, we could make these global
re_strip_tags = re.compile('<[^>]*>')
re_unicode_entities = re.compile('&#(\d{2,4});')
re_html_entities = re.compile('&(.{2,8});')
re_newline_tags = re.compile('(<br[^>]*>|<[/]?ul[^>]*>|</li>)', re.I)
re_listing_tags = re.compile('<li[^>]*>', re.I)
result = html
# Convert common HTML elements to their text equivalent
result = re_newline_tags.sub('\n', result)
result = re_listing_tags.sub('\n * ', result)
result = re.sub('<[Pp]>', '\n\n', result)
# Remove all HTML/XML tags from the string
result = re_strip_tags.sub('', result)
# Convert numeric XML entities to their unicode character
result = re_unicode_entities.sub(lambda x: unichr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
result = re_html_entities.sub(lambda x: unicode(entitydefs.get(x.group(1),''), 'iso-8859-1'), result)
# Convert more than two newlines to two newlines
result = re.sub('([\r\n]{2})([\r\n])+', '\\1', result)
return result.strip()
def wrong_extension(extension):
"""
Determine if a given extension looks like it's
wrong (e.g. empty, extremely long or spaces)
Returns True if the extension most likely is a
wrong one and should be replaced.
>>> wrong_extension('.mp3')
False
>>> wrong_extension('.divx')
False
>>> wrong_extension('mp3')
True
>>> wrong_extension('')
True
>>> wrong_extension('.12 - Everybody')
True
>>> wrong_extension('.mp3 ')
True
>>> wrong_extension('.')
True
>>> wrong_extension('.42')
True
"""
if not extension:
return True
elif len(extension) > 5:
return True
elif ' ' in extension:
return True
elif extension == '.':
return True
elif not extension.startswith('.'):
return True
else:
try:
# ".<number>" is an invalid extension
float(extension)
return True
except:
pass
return False
def extension_from_mimetype(mimetype):
"""
Simply guesses what the file extension should be from the mimetype
>>> extension_from_mimetype('audio/mp4')
'.m4a'
>>> extension_from_mimetype('audio/ogg')
'.ogg'
>>> extension_from_mimetype('audio/mpeg')
'.mp3'
>>> extension_from_mimetype('video/x-matroska')
'.mkv'
>>> extension_from_mimetype('wrong-mimetype')
''
"""
if mimetype in _MIME_TYPES:
return _MIME_TYPES[mimetype]
return mimetypes.guess_extension(mimetype) or ''
def mimetype_from_extension(extension):
"""
Simply guesses what the mimetype should be from the file extension
>>> mimetype_from_extension('.m4a')
'audio/mp4'
>>> mimetype_from_extension('.ogg')
'audio/ogg'
>>> mimetype_from_extension('.mp3')
'audio/mpeg'
>>> mimetype_from_extension('.mkv')
'video/x-matroska'
>>> mimetype_from_extension('._invalid_file_extension_')
''
"""
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension]
# Need to prepend something to the extension, so guess_type works
type, encoding = mimetypes.guess_type('file'+extension)
return type or ''
def extension_correct_for_mimetype(extension, mimetype):
"""
Check if the given filename extension (e.g. ".ogg") is a possible
extension for a given mimetype (e.g. "application/ogg") and return
a boolean value (True if it's possible, False if not). Also do
>>> extension_correct_for_mimetype('.ogg', 'application/ogg')
True
>>> extension_correct_for_mimetype('.ogv', 'video/ogg')
True
>>> extension_correct_for_mimetype('.ogg', 'audio/mpeg')
False
>>> extension_correct_for_mimetype('.m4a', 'audio/mp4')
True
>>> extension_correct_for_mimetype('mp3', 'audio/mpeg')
Traceback (most recent call last):
...
ValueError: "mp3" is not an extension (missing .)
>>> extension_correct_for_mimetype('.mp3', 'audio mpeg')
Traceback (most recent call last):
...
ValueError: "audio mpeg" is not a mimetype (missing /)
"""
if not '/' in mimetype:
raise ValueError('"%s" is not a mimetype (missing /)' % mimetype)
if not extension.startswith('.'):
raise ValueError('"%s" is not an extension (missing .)' % extension)
if (extension, mimetype) in _MIME_TYPE_LIST:
return True
# Create a "default" extension from the mimetype, e.g. "application/ogg"
# becomes ".ogg", "audio/mpeg" becomes ".mpeg", etc...
default = ['.'+mimetype.split('/')[-1]]
return extension in default+mimetypes.guess_all_extensions(mimetype)
def filename_from_url(url):
"""
Extracts the filename and (lowercase) extension (with dot)
from a URL, e.g. http://server.com/file.MP3?download=yes
will result in the string ("file", ".mp3") being returned.
This function will also try to best-guess the "real"
extension for a media file (audio, video) by
trying to match an extension to these types and recurse
into the query string to find better matches, if the
original extension does not resolve to a known type.
http://my.net/redirect.php?my.net/file.ogg => ("file", ".ogg")
http://server/get.jsp?file=/episode0815.MOV => ("episode0815", ".mov")
http://s/redirect.mp4?http://serv2/test.mp4 => ("test", ".mp4")
"""
(scheme, netloc, path, para, query, fragid) = urlparse.urlparse(url)
(filename, extension) = os.path.splitext(os.path.basename( urllib.unquote(path)))
if file_type_by_extension(extension) is not None and not \
query.startswith(scheme+'://'):
# We have found a valid extension (audio, video)
# and the query string doesn't look like a URL
return ( filename, extension.lower() )
# If the query string looks like a possible URL, try that first
if len(query.strip()) > 0 and query.find('/') != -1:
query_url = '://'.join((scheme, urllib.unquote(query)))
(query_filename, query_extension) = filename_from_url(query_url)
if file_type_by_extension(query_extension) is not None:
return os.path.splitext(os.path.basename(query_url))
# No exact match found, simply return the original filename & extension
return ( filename, extension.lower() )
def file_type_by_extension(extension):
"""
Tries to guess the file type by looking up the filename
extension from a table of known file types. Will return
"audio", "video" or None.
>>> file_type_by_extension('.aif')
'audio'
>>> file_type_by_extension('.3GP')
'video'
>>> file_type_by_extension('.m4a')
'audio'
>>> file_type_by_extension('.txt') is None
True
>>> file_type_by_extension(None) is None
True
>>> file_type_by_extension('ogg')
Traceback (most recent call last):
...
ValueError: Extension does not start with a dot: ogg
"""
if not extension:
return None
if not extension.startswith('.'):
raise ValueError('Extension does not start with a dot: %s' % extension)
extension = extension.lower()
if extension in _MIME_TYPES_EXT:
return _MIME_TYPES_EXT[extension].split('/')[0]
# Need to prepend something to the extension, so guess_type works
type, encoding = mimetypes.guess_type('file'+extension)
if type is not None and '/' in type:
filetype, rest = type.split('/', 1)
if filetype in ('audio', 'video', 'image'):
return filetype
return None
def get_first_line( s):
"""
Returns only the first line of a string, stripped so
that it doesn't have whitespace before or after.
"""
return s.strip().split('\n')[0].strip()
def object_string_formatter(s, **kwargs):
"""
Makes attributes of object passed in as keyword
arguments available as {OBJECTNAME.ATTRNAME} in
the passed-in string and returns a string with
the above arguments replaced with the attribute
values of the corresponding object.
>>> class x: pass
>>> a = x()
>>> a.title = 'Hello world'
>>> object_string_formatter('{episode.title}', episode=a)
'Hello world'
>>> class x: pass
>>> a = x()
>>> a.published = 123
>>> object_string_formatter('Hi {episode.published} 456', episode=a)
'Hi 123 456'
"""
result = s
for key, o in kwargs.iteritems():
matches = re.findall(r'\{%s\.([^\}]+)\}' % key, s)
for attr in matches:
if hasattr(o, attr):
try:
from_s = '{%s.%s}' % (key, attr)
to_s = str(getattr(o, attr))
result = result.replace(from_s, to_s)
except:
logger.warn('Replace of "%s" failed for "%s".', attr, s)
return result
def format_desktop_command(command, filenames, start_position=None):
"""
Formats a command template from the "Exec=" line of a .desktop
file to a string that can be invoked in a shell.
Handled format strings: %U, %u, %F, %f and a fallback that
appends the filename as first parameter of the command.
Also handles non-standard %p which is replaced with the start_position
(probably only makes sense if starting a single file). (see bug 1140)
See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html
Returns a list of commands to execute, either one for
each filename if the application does not support multiple
file names or one for all filenames (%U, %F or unknown).
"""
# Replace backslashes with slashes to fix win32 issues
# (even on win32, "/" works, but "\" does not)
command = command.replace('\\', '/')
if start_position is not None:
command = command.replace('%p', str(start_position))
command = shlex.split(command)
command_before = command
command_after = []
multiple_arguments = True
for fieldcode in ('%U', '%F', '%u', '%f'):
if fieldcode in command:
command_before = command[:command.index(fieldcode)]
command_after = command[command.index(fieldcode)+1:]
multiple_arguments = fieldcode in ('%U', '%F')
break
if multiple_arguments:
return [command_before + filenames + command_after]
commands = []
for filename in filenames:
commands.append(command_before+[filename]+command_after)
return commands
def url_strip_authentication(url):
"""
Strips authentication data from an URL. Returns the URL with
the authentication data removed from it.
>>> url_strip_authentication('https://host.com/')
'https://host.com/'
>>> url_strip_authentication('telnet://foo:bar@host.com/')
'telnet://host.com/'
>>> url_strip_authentication('ftp://billy@example.org')
'ftp://example.org'
>>> url_strip_authentication('ftp://billy:@example.org')
'ftp://example.org'
>>> url_strip_authentication('http://aa:bc@localhost/x')
'http://localhost/x'
>>> url_strip_authentication('http://i%2Fo:P%40ss%3A@blubb.lan/u.html')
'http://blubb.lan/u.html'
>>> url_strip_authentication('http://c:d@x.org/')
'http://x.org/'
>>> url_strip_authentication('http://P%40%3A:i%2F@cx.lan')
'http://cx.lan'
>>> url_strip_authentication('http://x@x.com:s3cret@example.com/')
'http://example.com/'
"""
url_parts = list(urlparse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
# Remove existing authentication data
if '@' in url_parts[1]:
url_parts[1] = url_parts[1].rsplit('@', 1)[1]
return urlparse.urlunsplit(url_parts)
def url_add_authentication(url, username, password):
"""
Adds authentication data (username, password) to a given
URL in order to construct an authenticated URL.
>>> url_add_authentication('https://host.com/', '', None)
'https://host.com/'
>>> url_add_authentication('http://example.org/', None, None)
'http://example.org/'
>>> url_add_authentication('telnet://host.com/', 'foo', 'bar')
'telnet://foo:bar@host.com/'
>>> url_add_authentication('ftp://example.org', 'billy', None)
'ftp://billy@example.org'
>>> url_add_authentication('ftp://example.org', 'billy', '')
'ftp://billy:@example.org'
>>> url_add_authentication('http://localhost/x', 'aa', 'bc')
'http://aa:bc@localhost/x'
>>> url_add_authentication('http://blubb.lan/u.html', 'i/o', 'P@ss:')
'http://i%2Fo:P@ss:@blubb.lan/u.html'
>>> url_add_authentication('http://a:b@x.org/', 'c', 'd')
'http://c:d@x.org/'
>>> url_add_authentication('http://i%2F:P%40%3A@cx.lan', 'P@x', 'i/')
'http://P@x:i%2F@cx.lan'
>>> url_add_authentication('http://x.org/', 'a b', 'c d')
'http://a%20b:c%20d@x.org/'
"""
if username is None or username == '':
return url
# Relaxations of the strict quoting rules (bug 1521):
# 1. Accept '@' in username and password
# 2. Acecpt ':' in password only
username = urllib.quote(username, safe='@')
if password is not None:
password = urllib.quote(password, safe='@:')
auth_string = ':'.join((username, password))
else:
auth_string = username
url = url_strip_authentication(url)
url_parts = list(urlparse.urlsplit(url))
# url_parts[1] is the HOST part of the URL
url_parts[1] = '@'.join((auth_string, url_parts[1]))
return urlparse.urlunsplit(url_parts)
def urlopen(url, headers=None, data=None, timeout=None):
"""
An URL opener with the User-agent set to gPodder (with version)
"""
username, password = username_password_from_url(url)
if username is not None or password is not None:
url = url_strip_authentication(url)
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
else:
opener = urllib2.build_opener()
if headers is None:
headers = {}
else:
headers = dict(headers)
headers.update({'User-agent': gpodder.user_agent})
request = urllib2.Request(url, data=data, headers=headers)
if timeout is None:
return opener.open(request)
else:
return opener.open(request, timeout=timeout)
def get_real_url(url):
"""
Gets the real URL of a file and resolves all redirects.
"""
try:
return urlopen(url).geturl()
except:
logger.error('Getting real url for %s', url, exc_info=True)
return url
def find_command(command):
"""
Searches the system's PATH for a specific command that is
executable by the user. Returns the first occurence of an
executable binary in the PATH, or None if the command is
not available.
On Windows, this also looks for "<command>.bat" and
"<command>.exe" files if "<command>" itself doesn't exist.
"""
if 'PATH' not in os.environ:
return None
for path in os.environ['PATH'].split(os.pathsep):
command_file = os.path.join(path, command)
if gpodder.ui.win32 and not os.path.exists(command_file):
for extension in ('.bat', '.exe'):
cmd = command_file + extension
if os.path.isfile(cmd):
command_file = cmd
break
if os.path.isfile(command_file) and os.access(command_file, os.X_OK):
return command_file
return None
idle_add_handler = None
def idle_add(func, *args):
"""Run a function in the main GUI thread
This is a wrapper function that does the Right Thing depending on if we are
running on Gtk+, Qt or CLI.
You should use this function if you are calling from a Python thread and
modify UI data, so that you make sure that the function is called as soon
as possible from the main UI thread.
"""
if gpodder.ui.gtk:
import gobject
gobject.idle_add(func, *args)
elif gpodder.ui.qml:
from PySide.QtCore import Signal, QTimer, QThread, Qt, QObject
class IdleAddHandler(QObject):
signal = Signal(object)
def __init__(self):
QObject.__init__(self)
self.main_thread_id = QThread.currentThreadId()
self.signal.connect(self.run_func)
def run_func(self, func):
assert QThread.currentThreadId() == self.main_thread_id, \
("Running in %s, not %s"
% (str(QThread.currentThreadId()),
str(self.main_thread_id)))
func()
def idle_add(self, func, *args):
def doit():
try:
func(*args)
except Exception, e:
logger.exception("Running %s%s: %s",
func, str(tuple(args)), str(e))
if QThread.currentThreadId() == self.main_thread_id:
# If we emit the signal in the main thread,
# then the function will be run immediately.
# Instead, use a single shot timer with a 0
# timeout: this will run the function when the
# event loop next iterates.
QTimer.singleShot(0, doit)
else:
self.signal.emit(doit)
global idle_add_handler
if idle_add_handler is None:
idle_add_handler = IdleAddHandler()
idle_add_handler.idle_add(func, *args)
else:
func(*args)
def bluetooth_available():
"""
Returns True or False depending on the availability
of bluetooth functionality on the system.
"""
if find_command('bluetooth-sendto') or \
find_command('gnome-obex-send'):
return True
else:
return False
def bluetooth_send_file(filename):
"""
Sends a file via bluetooth.
This function tries to use "bluetooth-sendto", and if
it is not available, it also tries "gnome-obex-send".
"""
command_line = None
if find_command('bluetooth-sendto'):
command_line = ['bluetooth-sendto']
elif find_command('gnome-obex-send'):
command_line = ['gnome-obex-send']
if command_line is not None:
command_line.append(filename)
return (subprocess.Popen(command_line).wait() == 0)
else:
logger.error('Cannot send file. Please install "bluetooth-sendto" or "gnome-obex-send".')
return False
def format_time(value):
"""Format a seconds value to a string
>>> format_time(0)
'00:00'
>>> format_time(20)
'00:20'
>>> format_time(3600)
'01:00:00'
>>> format_time(10921)
'03:02:01'
"""
dt = datetime.datetime.utcfromtimestamp(value)
if dt.hour == 0:
return dt.strftime('%M:%S')
else:
return dt.strftime('%H:%M:%S')
def parse_time(value):
"""Parse a time string into seconds
>>> parse_time('00:00')
0
>>> parse_time('00:00:00')
0
>>> parse_time('00:20')
20
>>> parse_time('00:00:20')
20
>>> parse_time('01:00:00')
3600
>>> parse_time('03:02:01')
10921
>>> parse_time('61:08')
3668
>>> parse_time('25:03:30')
90210
>>> parse_time('25:3:30')
90210
>>> parse_time('61.08')
3668
"""
if value == '':
return 0
if not value:
raise ValueError('Invalid value: %s' % (str(value),))
m = re.match(r'(\d+)[:.](\d\d?)[:.](\d\d?)', value)
if m:
hours, minutes, seconds = m.groups()
return (int(hours) * 60 + int(minutes)) * 60 + int(seconds)
m = re.match(r'(\d+)[:.](\d\d?)', value)
if m:
minutes, seconds = m.groups()
return int(minutes) * 60 + int(seconds)
return int(value)
def format_seconds_to_hour_min_sec(seconds):
"""
Take the number of seconds and format it into a
human-readable string (duration).
>>> format_seconds_to_hour_min_sec(3834)
u'1 hour, 3 minutes and 54 seconds'
>>> format_seconds_to_hour_min_sec(3600)
u'1 hour'
>>> format_seconds_to_hour_min_sec(62)
u'1 minute and 2 seconds'
"""
if seconds < 1:
return N_('%(count)d second', '%(count)d seconds', seconds) % {'count':seconds}
result = []
seconds = int(seconds)
hours = seconds/3600
seconds = seconds%3600
minutes = seconds/60
seconds = seconds%60
if hours:
result.append(N_('%(count)d hour', '%(count)d hours', hours) % {'count':hours})
if minutes:
result.append(N_('%(count)d minute', '%(count)d minutes', minutes) % {'count':minutes})
if seconds:
result.append(N_('%(count)d second', '%(count)d seconds', seconds) % {'count':seconds})
if len(result) > 1:
return (' '+_('and')+' ').join((', '.join(result[:-1]), result[-1]))
else:
return result[0]
def http_request(url, method='HEAD'):
(scheme, netloc, path, parms, qry, fragid) = urlparse.urlparse(url)
conn = httplib.HTTPConnection(netloc)
start = len(scheme) + len('://') + len(netloc)
conn.request(method, url[start:])
return conn.getresponse()
def gui_open(filename):
"""
Open a file or folder with the default application set
by the Desktop environment. This uses "xdg-open" on all
systems with a few exceptions:
on Win32, os.startfile() is used
"""
try:
if gpodder.ui.win32:
os.startfile(filename)
elif gpodder.ui.osx:
subprocess.Popen(['open', filename])
else:
subprocess.Popen(['xdg-open', filename])
return True
except:
logger.error('Cannot open file/folder: "%s"', filename, exc_info=True)
return False
def open_website(url):
"""
Opens the specified URL using the default system web
browser. This uses Python's "webbrowser" module, so
make sure your system is set up correctly.
"""
run_in_background(lambda: webbrowser.open(url))
def convert_bytes(d):
"""
Convert byte strings to unicode strings
This function will decode byte strings into unicode
strings. Any other data types will be left alone.
>>> convert_bytes(None)
>>> convert_bytes(1)
1
>>> convert_bytes(4711L)
4711L
>>> convert_bytes(True)
True
>>> convert_bytes(3.1415)
3.1415
>>> convert_bytes('Hello')
u'Hello'
>>> convert_bytes(u'Hey')
u'Hey'
"""
if d is None:
return d
if any(isinstance(d, t) for t in (int, long, bool, float)):
return d
elif not isinstance(d, unicode):
return d.decode('utf-8', 'ignore')
return d
def sanitize_encoding(filename):
r"""
Generate a sanitized version of a string (i.e.
remove invalid characters and encode in the
detected native language encoding).
>>> sanitize_encoding('\x80')
''
>>> sanitize_encoding(u'unicode')
'unicode'
"""
# The encoding problem goes away in Python 3.. hopefully!
if sys.version_info >= (3, 0):
return filename
global encoding
if not isinstance(filename, unicode):
filename = filename.decode(encoding, 'ignore')
return filename.encode(encoding, 'ignore')
def sanitize_filename(filename, max_length=0, use_ascii=False):
"""
Generate a sanitized version of a filename that can
be written on disk (i.e. remove/replace invalid
characters and encode in the native language) and
trim filename if greater than max_length (0 = no limit).
If use_ascii is True, don't encode in the native language,
but use only characters from the ASCII character set.
"""
if not isinstance(filename, unicode):
filename = filename.decode(encoding, 'ignore')
if max_length > 0 and len(filename) > max_length:
logger.info('Limiting file/folder name "%s" to %d characters.',
filename, max_length)
filename = filename[:max_length]
filename = filename.encode('ascii' if use_ascii else encoding, 'ignore')
filename = filename.translate(SANITIZATION_TABLE)
filename = filename.strip('.' + string.whitespace)
return filename
def find_mount_point(directory):
"""
Try to find the mount point for a given directory.
If the directory is itself a mount point, return
it. If not, remove the last part of the path and
re-check if it's a mount point. If the directory
resides on your root filesystem, "/" is returned.
>>> find_mount_point('/')
'/'
>>> find_mount_point(u'/something')
Traceback (most recent call last):
...
ValueError: Convert unicode objects to str first.
>>> find_mount_point(None)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> find_mount_point(42)
Traceback (most recent call last):
...
ValueError: Directory names should be of type str.
>>> from minimock import mock, restore
>>> mocked_mntpoints = ('/', '/home', '/media/usbdisk', '/media/cdrom')
>>> mock('os.path.ismount', returns_func=lambda x: x in mocked_mntpoints)
>>>
>>> # For mocking os.getcwd(), we simply use a lambda to avoid the
>>> # massive output of "Called os.getcwd()" lines in this doctest
>>> os.getcwd = lambda: '/home/thp'
>>>
>>> find_mount_point('.')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('relativity')
Called os.path.ismount('/home/thp/relativity')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/thp/Desktop')
Called os.path.ismount('/home/thp/Desktop')
Called os.path.ismount('/home/thp')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts/With Spaces')
Called os.path.ismount('/media/usbdisk/Podcasts')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> find_mount_point('/home/')
Called os.path.ismount('/home')
'/home'
>>> find_mount_point('/media/cdrom/../usbdisk/blubb//')
Called os.path.ismount('/media/usbdisk/blubb')
Called os.path.ismount('/media/usbdisk')
'/media/usbdisk'
>>> restore()
"""
if isinstance(directory, unicode):
# XXX: This is only valid for Python 2 - misleading error in Python 3?
# We do not accept unicode strings, because they could fail when
# trying to be converted to some native encoding, so fail loudly
# and leave it up to the callee to encode into the proper encoding.
raise ValueError('Convert unicode objects to str first.')
if not isinstance(directory, str):
# In Python 2, we assume it's a byte str; in Python 3, we assume
# that it's a unicode str. The abspath/ismount/split functions of
# os.path work with unicode str in Python 3, but not in Python 2.
raise ValueError('Directory names should be of type str.')
directory = os.path.abspath(directory)
while directory != '/':
if os.path.ismount(directory):
return directory
else:
(directory, tail_data) = os.path.split(directory)
return '/'
# matches http:// and ftp:// and mailto://
protocolPattern = re.compile(r'^\w+://')
def isabs(string):
"""
@return true if string is an absolute path or protocoladdress
for addresses beginning in http:// or ftp:// or ldap:// -
they are considered "absolute" paths.
Source: http://code.activestate.com/recipes/208993/
"""
if protocolPattern.match(string): return 1
return os.path.isabs(string)
def commonpath(l1, l2, common=[]):
"""
helper functions for relpath
Source: http://code.activestate.com/recipes/208993/
"""
if len(l1) < 1: return (common, l1, l2)
if len(l2) < 1: return (common, l1, l2)
if l1[0] != l2[0]: return (common, l1, l2)
return commonpath(l1[1:], l2[1:], common+[l1[0]])
def relpath(p1, p2):
"""
Finds relative path from p1 to p2
Source: http://code.activestate.com/recipes/208993/
"""
pathsplit = lambda s: s.split(os.path.sep)
(common,l1,l2) = commonpath(pathsplit(p1), pathsplit(p2))
p = []
if len(l1) > 0:
p = [ ('..'+os.sep) * len(l1) ]
p = p + l2
if len(p) is 0:
return "."
return os.path.join(*p)
def get_hostname():
"""Return the hostname of this computer
This can be implemented in a different way on each
platform and should yield a unique-per-user device ID.
"""
nodename = platform.node()
if nodename:
return nodename
# Fallback - but can this give us "localhost"?
return socket.gethostname()
def detect_device_type():
"""Device type detection for gpodder.net
This function tries to detect on which
kind of device gPodder is running on.
Possible return values:
desktop, laptop, mobile, server, other
"""
if gpodder.ui.harmattan:
return 'mobile'
elif glob.glob('/proc/acpi/battery/*'):
# Linux: If we have a battery, assume Laptop
return 'laptop'
return 'desktop'
def write_m3u_playlist(m3u_filename, episodes, extm3u=True):
"""Create an M3U playlist from a episode list
If the parameter "extm3u" is False, the list of
episodes should be a list of filenames, and no
extended information will be written into the
M3U files (#EXTM3U / #EXTINF).
If the parameter "extm3u" is True (default), then the
list of episodes should be PodcastEpisode objects,
as the extended metadata will be taken from them.
"""
f = open(m3u_filename, 'w')
if extm3u:
# Mandatory header for extended playlists
f.write('#EXTM3U\n')
for episode in episodes:
if not extm3u:
# Episode objects are strings that contain file names
f.write(episode+'\n')
continue
if episode.was_downloaded(and_exists=True):
filename = episode.local_filename(create=False)
assert filename is not None
if os.path.dirname(filename).startswith(os.path.dirname(m3u_filename)):
filename = filename[len(os.path.dirname(m3u_filename)+os.sep):]
f.write('#EXTINF:0,'+episode.playlist_title()+'\n')
f.write(filename+'\n')
f.close()
def generate_names(filename):
basename, ext = os.path.splitext(filename)
for i in itertools.count():
if i:
yield '%s (%d)%s' % (basename, i+1, ext)
else:
yield filename
def is_known_redirecter(url):
"""Check if a URL redirect is expected, and no filenames should be updated
We usually honor URL redirects, and update filenames accordingly.
In some cases (e.g. Soundcloud) this results in a worse filename,
so we hardcode and detect these cases here to avoid renaming files
for which we know that a "known good default" exists.
The problem here is that by comparing the currently-assigned filename
with the new filename determined by the URL, we cannot really determine
which one is the "better" URL (e.g. "n5rMSpXrqmR9.128.mp3" for Soundcloud).
"""
# Soundcloud-hosted media downloads (we take the track name as filename)
if url.startswith('http://ak-media.soundcloud.com/'):
return True
return False
def atomic_rename(old_name, new_name):
"""Atomically rename/move a (temporary) file
This is usually used when updating a file safely by writing
the new contents into a temporary file and then moving the
temporary file over the original file to replace it.
"""
if gpodder.ui.win32:
# Win32 does not support atomic rename with os.rename
shutil.move(old_name, new_name)
else:
os.rename(old_name, new_name)
def check_command(self, cmd):
"""Check if a command line command/program exists"""
# Prior to Python 2.7.3, this module (shlex) did not support Unicode input.
cmd = sanitize_encoding(cmd)
program = shlex.split(cmd)[0]
return (find_command(program) is not None)
def rename_episode_file(episode, filename):
"""Helper method to update a PodcastEpisode object
Useful after renaming/converting its download file.
"""
if not os.path.exists(filename):
raise ValueError('Target filename does not exist.')
basename, extension = os.path.splitext(filename)
episode.download_filename = os.path.basename(filename)
episode.file_size = os.path.getsize(filename)
episode.mime_type = mimetype_from_extension(extension)
episode.save()
episode.db.commit()
def get_update_info(url='http://gpodder.org/downloads'):
"""
Get up to date release information from gpodder.org.
Returns a tuple: (up_to_date, latest_version, release_date, days_since)
Example result (up to date version, 20 days after release):
(True, '3.0.4', '2012-01-24', 20)
Example result (outdated version, 10 days after release):
(False, '3.0.5', '2012-02-29', 10)
"""
data = urlopen(url).read()
id_field_re = re.compile(r'<([a-z]*)[^>]*id="([^"]*)"[^>]*>([^<]*)</\1>')
info = dict((m.group(2), m.group(3)) for m in id_field_re.finditer(data))
latest_version = info['latest-version']
release_date = info['release-date']
release_parsed = datetime.datetime.strptime(release_date, '%Y-%m-%d')
days_since_release = (datetime.datetime.today() - release_parsed).days
convert = lambda s: tuple(int(x) for x in s.split('.'))
up_to_date = (convert(gpodder.__version__) >= convert(latest_version))
return up_to_date, latest_version, release_date, days_since_release
def run_in_background(function, daemon=False):
logger.debug('run_in_background: %s (%s)', function, str(daemon))
thread = threading.Thread(target=function)
thread.setDaemon(daemon)
thread.start()
return thread
def linux_get_active_interfaces():
"""Get active network interfaces using 'ip link'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ip', 'link'], stdout=subprocess.PIPE)
data, _ = process.communicate()
for interface, _ in re.findall(r'\d+: ([^:]+):.*state (UP|UNKNOWN)', data):
if interface != 'lo':
yield interface
def osx_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ifconfig'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split('\n(?!\t)', stdout, re.MULTILINE):
b = re.match('(\\w+):.*status: (active|associated)$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def unix_get_active_interfaces():
"""Get active network interfaces using 'ifconfig'
Returns a list of active network interfaces or an
empty list if the device is offline. The loopback
interface is not included.
"""
process = subprocess.Popen(['ifconfig'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
for i in re.split('\n(?!\t)', stdout, re.MULTILINE):
b = re.match('(\\w+):.*status: active$', i, re.MULTILINE | re.DOTALL)
if b:
yield b.group(1)
def connection_available():
"""Check if an Internet connection is available
Returns True if a connection is available (or if there
is no way to determine the connection). Returns False
if no network interfaces are up (i.e. no connectivity).
"""
try:
if gpodder.ui.win32:
# FIXME: Implement for Windows
return True
elif gpodder.ui.osx:
return len(list(osx_get_active_interfaces())) > 0
else:
# By default, we assume we're not offline (bug 1730)
offline = False
if find_command('ifconfig') is not None:
# If ifconfig is available, and it says we don't have
# any active interfaces, assume we're offline
if len(list(unix_get_active_interfaces())) == 0:
offline = True
# If we assume we're offline, try the "ip" command as fallback
if offline and find_command('ip') is not None:
if len(list(linux_get_active_interfaces())) == 0:
offline = True
else:
offline = False
return not offline
return False
except Exception, e:
logger.warn('Cannot get connection status: %s', e, exc_info=True)
# When we can't determine the connection status, act as if we're online (bug 1730)
return True
def website_reachable(url):
"""
Check if a specific website is available.
"""
if not connection_available():
# No network interfaces up - assume website not reachable
return (False, None)
try:
response = urllib2.urlopen(url, timeout=1)
return (True, response)
except urllib2.URLError as err:
pass
return (False, None)
def delete_empty_folders(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in dirs:
dirname = os.path.join(root, name)
if not os.listdir(dirname):
os.rmdir(dirname)
| somini/gpodder | src/gpodder/util.py | Python | gpl-3.0 | 54,845 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
import tempfile
from copy import copy
from datetime import datetime
import BTrees.OIBTree as OIBTree
from BTrees.IOBTree import IOBTree
from BTrees.OOBTree import OOBTree, intersection, union
from persistent import Persistent
from persistent.list import PersistentList
from pytz import timezone
from indico.core.config import Config
from indico.modules.events.abstracts.legacy import (contribution_from_abstract,
AbstractFieldManagerAdapter,
AbstractJudgmentLegacyMixin,
AbstractLegacyMixin,
AbstractManagerLegacyMixin,
AbstractStatusAcceptedLegacyMixin)
from indico.modules.events.contributions.models.types import ContributionType
from indico.util.string import safe_slice, safe_upper
from MaKaC.common.Counter import Counter
from MaKaC.common.timezoneUtils import nowutc
from MaKaC.errors import MaKaCError, NoReportError
from MaKaC.i18n import _
from MaKaC.trashCan import TrashCanManager
class _AbstractParticipationIndex(Persistent):
"""This class allows to index abstract participations (submitters)
for a single CFA process; this means that clients will be able to
efficiently perform queries of the type "give me all the abstracts
in which a certain registered user is implied".
For being able to perform this indexing, it is supposed that the Avatar
identifier is unique among other avatars and that it cannot change.
This index must be maintained by clients (i.e. the CFAMgr) as it doesn't
keep track of the changes on Participantons.
The key of the index is the Avatar and the values the different
Participations that user has within the current CFA process. For
performance reasons, the Avatar id will be used as index key (using the
whole Avatar object would make the index bigger and as the Avatar id
cannot change it's enough); the clients would have to keep the
integrity of the index.
"""
def __init__(self):
self._idx = OOBTree()
def index(self, participation):
"""Add a new participation to the index
"""
#if the Participation is not linked to an Avatar there's no point to
# index it
a = participation.getAvatar()
if not a:
return
#ToDo: if the Participation corresponds to an abstract which doesn't
# correspond to the current CFAMgr, then an error must be raised
if not self._idx.has_key(a.getId()):
self._idx[a.getId()] = PersistentList()
#if the participation is already in the index, no need for adding it
if participation in self._idx[a.getId()]:
return
self._idx[a.getId()].append(participation)
def unindex(self, participation):
"""Remove an existing participation from the index
"""
#if the Participation is not linked to an Avatar there's no point to
# unindex it
a = participation.getAvatar()
if not a:
return
#if the Avatar associated to the participation isn't in the index do
# nothing
if not self._idx.has_key(a.getId()):
return
#if the given participation is indexed remove it, otherwise do nothing
if participation in self._idx[a.getId()]:
self._idx[a.getId()].remove(participation)
def getParticipationList(self, av):
try:
return self._idx[av.getId()]
except KeyError, e:
return []
class AbstractParticipation(Persistent):
def __init__(self, abstract, **data):
self._abstract = abstract
self._firstName = ""
self._surName = ""
self._email = ""
self._affilliation = ""
self._address = ""
self._telephone = ""
self._fax = ""
self._title = ""
self.setData(**data)
def setFromAvatar(self, av):
data = {"title": av.getTitle(),
"firstName": av.getName(),
"surName": av.getSurName(),
"email": av.getEmail(),
"affiliation": av.getOrganisation(),
"address": av.getAddress(),
"telephone": av.getTelephone(),
"fax": av.getFax()}
self.setData(**data)
def setFromAbstractParticipation(self, part):
data = {"title": part.getTitle(),
"firstName": part.getFirstName(),
"surName": part.getSurName(),
"email": part.getEmail(),
"affiliation": part.getAffiliation(),
"address": part.getAddress(),
"telephone": part.getTelephone(),
"fax": part.getFax()}
self.setData(**data)
def setData(self, **data):
if "firstName" in data:
self.setFirstName(data["firstName"])
if "surName" in data:
self.setSurName(data["surName"])
if "email" in data:
self.setEmail(data["email"])
if "affiliation" in data:
self.setAffiliation(data["affiliation"])
if "address" in data:
self.setAddress(data["address"])
if "telephone" in data:
self.setTelephone(data["telephone"])
if "fax" in data:
self.setFax(data["fax"])
if "title" in data:
self.setTitle(data["title"])
setValues = setData
def getData(self):
data = {}
data["firstName"] = self.getFirstName()
data["surName"] = self.getSurName()
data["email"] = self.getEmail()
data["affiliation"] = self.getAffiliation()
data["address"] = self.getAddress()
data["telephone"] = self.getTelephone()
data["fax"] = self.getFax()
data["title"] = self.getTitle()
return data
getValues = getData
def clone(self, abstract):
ap = AbstractParticipation(abstract, self.getData())
return ap
def _notifyModification(self):
self._abstract._notifyModification()
def _unindex(self):
abs = self.getAbstract()
if abs is not None:
mgr = abs.getOwner()
if mgr is not None:
mgr.unindexAuthor(self)
def _index(self):
abs = self.getAbstract()
if abs is not None:
mgr = abs.getOwner()
if mgr is not None:
mgr.indexAuthor(self)
def setFirstName(self, name):
tmp = name.strip()
if tmp == self.getFirstName():
return
self._unindex()
self._firstName = tmp
self._index()
self._notifyModification()
def getFirstName(self):
return self._firstName
def getName(self):
return self._firstName
def setSurName(self, name):
tmp = name.strip()
if tmp == self.getSurName():
return
self._unindex()
self._surName = tmp
self._index()
self._notifyModification()
def getSurName(self):
return self._surName
def getFamilyName(self):
return self._surName
def setEmail(self, email):
email = email.strip().lower()
if email != self.getEmail():
self._unindex()
self._email = email
self._index()
self._notifyModification()
def getEmail(self):
return self._email
def setAffiliation(self, af):
self._affilliation = af.strip()
self._notifyModification()
setAffilliation = setAffiliation
def getAffiliation(self):
return self._affilliation
@property
def affiliation(self):
return self._affilliation
def setAddress(self, address):
self._address = address.strip()
self._notifyModification()
def getAddress(self):
return self._address
def setTelephone(self, telf):
self._telephone = telf.strip()
self._notifyModification()
def getTelephone(self):
return self._telephone
def setFax(self, fax):
self._fax = fax.strip()
self._notifyModification()
def getFax(self):
return self._fax
def setTitle(self, title):
self._title = title.strip()
self._notifyModification()
def getTitle(self):
return self._title
def getFullName(self):
res = safe_upper(self.getSurName())
tmp = []
for name in self.getFirstName().lower().split(" "):
if not name.strip():
continue
name = name.strip()
tmp.append(safe_upper(safe_slice(name, 0, 1)) + safe_slice(name, 1))
firstName = " ".join(tmp)
if firstName:
res = "%s, %s" % (res, firstName)
if self.getTitle():
res = "%s %s" % (self.getTitle(), res)
return res
@property
def full_name(self):
return self.getFullName()
def getStraightFullName(self):
name = ""
if self.getName():
name = "%s " % self.getName()
return "%s%s" % (name, self.getSurName())
def getAbrName(self):
res = self.getSurName()
if self.getFirstName():
if res:
res = "%s, " % res
res = "%s%s." % (res, safe_upper(safe_slice(self.getFirstName(), 0, 1)))
return res
def getAbstract(self):
return self._abstract
def setAbstract(self, abs):
self._abstract = abs
def delete(self):
self._unindex()
self._abstract = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
class Author(AbstractParticipation):
def __init__(self, abstract, **data):
AbstractParticipation.__init__(self, abstract, **data)
self._abstractId = ""
def getId(self):
return self._id
def setId(self, newId):
self._id = str(newId)
def clone(self, abstract):
auth = Author(abstract, self.getData())
return auth
def isSpeaker(self):
return self._abstract.isSpeaker(self)
class Submitter(AbstractParticipation):
def __init__(self, abstract, av):
if av is None:
raise MaKaCError(_("abstract submitter cannot be None"))
AbstractParticipation.__init__(self, abstract)
self._user = None
self._setUser(av)
self.setFromAvatar(av)
def _setUser(self, av):
if self.getUser() == av:
return
#if currently there's an association with a registered user, we notify
# the unidexation of the participation
if self.getUser():
self.getAbstract().getOwner().unregisterParticipation(self)
self._user = av
#if the participation is associated to any avatar, we make the
# association and index it
if self.getUser():
self.getAbstract().getOwner().registerParticipation(self)
def clone(self, abstract):
sub = Submitter(abstract, self.getAvatar())
sub.setData(self.getData())
return sub
def getUser(self):
return self._user
def getAvatar(self):
return self._user
def representsUser(self, av):
return self.getUser() == av
class _AuthIdx(Persistent):
def __init__(self, mgr):
self._mgr = mgr
self._idx = OOBTree()
def _getKey(self, auth):
return "%s %s" % (auth.getSurName().lower(), auth.getFirstName().lower())
def index(self, auth):
if auth.getAbstract() is None:
raise MaKaCError(_("cannot index an author of an abstract which is not included in a conference"))
if auth.getAbstract().getOwner() != self._mgr:
raise MaKaCError(_("cannot index an author of an abstract which does not belong to this conference"))
key = self._getKey(auth)
abstractId = str(auth.getAbstract().getId())
if not self._idx.has_key(key):
self._idx[key] = OIBTree.OIBTree()
if not self._idx[key].has_key(abstractId):
self._idx[key][abstractId] = 0
self._idx[key][abstractId] += 1
def unindex(self, auth):
if auth.getAbstract() is None:
raise MaKaCError(_("cannot unindex an author of an abstract which is not included in a conference"))
if auth.getAbstract().getOwner() != self._mgr:
raise MaKaCError(_("cannot unindex an author of an abstract which does not belong to this conference"))
key = self._getKey(auth)
if not self._idx.has_key(key):
return
abstractId = str(auth.getAbstract().getId())
if abstractId not in self._idx[key]:
return
self._idx[key][abstractId] -= 1
if self._idx[key][abstractId] <= 0:
del self._idx[key][abstractId]
if len(self._idx[key]) <= 0:
del self._idx[key]
def match(self, query):
query = query.lower().strip()
res = OIBTree.OISet()
for k in self._idx.keys():
if k.find(query) != -1:
res = OIBTree.union(res, self._idx[k])
return res
class _PrimAuthIdx(_AuthIdx):
def __init__(self, mgr):
_AuthIdx.__init__(self, mgr)
for abs in self._mgr.getAbstractList():
for auth in abs.getPrimaryAuthorList():
self.index(auth)
class _AuthEmailIdx(_AuthIdx):
def __init__(self, mgr):
_AuthIdx.__init__(self, mgr)
for abs in self._mgr.getAbstractList():
for auth in abs.getPrimaryAuthorList():
self.index(auth)
for auth in abs.getCoAuthorList():
self.index(auth)
def _getKey(self, auth):
return auth.getEmail().lower()
class AbstractMgr(AbstractManagerLegacyMixin, Persistent):
def __init__(self, owner):
self._owner = owner
self._abstracts = OOBTree()
self._participationIdx = _AbstractParticipationIndex()
self.__abstractGenerator = Counter()
self._activated = False
self.setStartSubmissionDate(datetime.now())
self.setEndSubmissionDate(datetime.now())
## self._contribTypes = PersistentList()
self.setAnnouncement("")
self._notifTpls = IOBTree()
self._notifTplsOrder = PersistentList()
self.__notifTplsCounter = Counter()
self._authorizedSubmitter = PersistentList()
self._primAuthIdx = _PrimAuthIdx(self)
self._authEmailIdx = _AuthEmailIdx(self)
self._submissionNotification = SubmissionNotification()
self._multipleTracks = True
self._tracksMandatory = False
self._attachFiles = False
self._showSelectAsSpeaker = True
self._selectSpeakerMandatory = True
self._showAttachedFilesContribList = False
def getMultipleTracks(self):
try:
return self._multipleTracks
except:
self.setMultipleTracks(True)
return self._multipleTracks
def setMultipleTracks(self, multipleTracks=True):
self._multipleTracks = multipleTracks
def areTracksMandatory(self):
try:
return self._tracksMandatory
except:
self.setTracksMandatory(False)
return self._tracksMandatory
def canAttachFiles(self):
try:
return self._attachFiles
except:
self.setAllowAttachFiles(False)
return self._attachFiles
def setAllowAttachFiles(self, attachedFiles):
self._attachFiles = attachedFiles
def setTracksMandatory(self, tracksMandatory=False):
self._tracksMandatory = tracksMandatory
def showSelectAsSpeaker(self):
try:
return self._showSelectAsSpeaker
except:
self._showSelectAsSpeaker = True
return self._showSelectAsSpeaker
def setShowSelectAsSpeaker(self, showSelectAsSpeaker):
self._showSelectAsSpeaker = showSelectAsSpeaker
def isSelectSpeakerMandatory(self):
try:
return self._selectSpeakerMandatory
except:
self._selectSpeakerMandatory = True
return self._selectSpeakerMandatory
def setSelectSpeakerMandatory(self, selectSpeakerMandatory):
self._selectSpeakerMandatory = selectSpeakerMandatory
def showAttachedFilesContribList(self):
try:
return self._showAttachedFilesContribList
except:
self._showAttachedFilesContribList = False
return self._showAttachedFilesContribList
def setSwitchShowAttachedFilesContribList(self, showshowAttachedFilesContribList):
self._showAttachedFilesContribList = showshowAttachedFilesContribList
def getAbstractFieldsMgr(self):
return AbstractFieldManagerAdapter(self._owner.as_event)
def clone(self, conference):
# XXX: Couldn't find any calls of this, but raise an exception just in case...
raise NotImplementedError('Abstract manager should never be cloned')
amgr = AbstractMgr(conference)
amgr.setAnnouncement(self.getAnnouncement())
timeDifference = conference.getStartDate() - self.getOwner().getStartDate()
amgr.setStartSubmissionDate(self.getStartSubmissionDate() + timeDifference)
amgr.setEndSubmissionDate(self.getEndSubmissionDate() + timeDifference)
modifDeadline = self.getModificationDeadline()
if modifDeadline is not None:
amgr.setModificationDeadline(self.getModificationDeadline() + timeDifference)
amgr.setActive(self.isActive())
if self.getCFAStatus():
amgr.activeCFA()
else:
amgr.desactiveCFA()
for a in self.getAbstractList():
amgr.addAbstract(a.clone(conference, amgr._generateNewAbstractId()))
for tpl in self.getNotificationTplList():
amgr.addNotificationTpl(tpl.clone())
# Cloning submission notification:
amgr.setSubmissionNotification(self.getSubmissionNotification().clone())
return amgr
def getOwner(self):
return self._owner
getConference = getOwner
def getTimezone(self):
return self.getConference().getTimezone()
def activeCFA(self):
self._activated = True
def desactiveCFA(self):
self._activated = False
def getAuthorizedSubmitterList(self):
try:
return self._authorizedSubmitter
except AttributeError:
self._authorizedSubmitter = PersistentList()
return self._authorizedSubmitter
def addAuthorizedSubmitter(self, av):
try:
if self._authorizedSubmitter:
pass
except AttributeError:
self._authorizedSubmitter = PersistentList()
if not av in self._authorizedSubmitter:
self._authorizedSubmitter.append(av)
def removeAuthorizedSubmitter(self, av):
try:
if self._authorizedSubmitter:
pass
except:
self._authorizedSubmitter = PersistentList()
if av in self._authorizedSubmitter:
self._authorizedSubmitter.remove(av)
def getCFAStatus(self):
return self._activated
def setActive(self, value):
if value:
self.activeCFA()
else:
self.desactiveCFA()
def isActive(self):
return self._activated
def setStartSubmissionDate(self, date):
self._submissionStartDate = datetime(date.year, date.month, date.day, 0, 0, 0)
def getStartSubmissionDate(self):
return timezone(self.getTimezone()).localize(self._submissionStartDate)
def setEndSubmissionDate(self, date):
self._submissionEndDate = datetime(date.year, date.month, date.day, 23, 59, 59)
def getEndSubmissionDate(self):
return timezone(self.getTimezone()).localize(self._submissionEndDate)
def inSubmissionPeriod(self, date=None):
if date is None:
date = nowutc()
sd = self.getStartSubmissionDate()
ed = self.getEndSubmissionDate()
return date <= ed and date >= sd
def getModificationDeadline(self):
"""Returns the deadline for modifications on the submitted abstracts.
"""
try:
if self._modifDeadline:
pass
except AttributeError, e:
self._modifDeadline = None
if self._modifDeadline is not None:
return timezone(self.getTimezone()).localize(self._modifDeadline)
else:
return None
def setModificationDeadline(self, newDL):
"""Sets a new deadline for modifications on the submitted abstracts.
"""
if newDL is not None:
self._modifDeadline = datetime(newDL.year, newDL.month, newDL.day, 23, 59, 59)
else:
self._modifDeadline = newDL
def inModificationPeriod(self, date=None):
"""Tells whether is possible to modify a submitted abstract in a
certain date.
"""
if date is None:
date = nowutc()
if not self.getModificationDeadline():
return True
return date <= self.getModificationDeadline()
def getAnnouncement(self):
#to be removed
try:
if self._announcement:
pass
except AttributeError, e:
self._announcement = ""
return self._announcement
def setAnnouncement(self, newAnnouncement):
self._announcement = newAnnouncement.strip()
def _getOldAbstractCounter(self):
return self.__abstractGenerator._getCount()
def newAbstract(self, av, **data):
"""Creates a new abstract under this manager
"""
from indico.modules.events.contributions import Contribution
new_abstract = self._new_abstract(self.getConference().as_event)
# sanity checks to avoid collisions
assert str(new_abstract.id) not in self._abstracts
assert not Contribution.query.with_parent(new_abstract.event_new).filter_by(friendly_id=new_abstract.id).count()
a = Abstract(self, str(new_abstract.friendly_id), av, **data)
self._abstracts[str(new_abstract.friendly_id)] = a
for auth in a.getPrimaryAuthorList():
self.indexAuthor(auth)
return a
def addAbstract(self, abstract):
if abstract in self.getAbstractList():
return
if isinstance(abstract.getCurrentStatus(), AbstractStatusWithdrawn):
raise MaKaCError(_("Cannot add an abstract which has been withdrawn"), ("Event"))
abstract._setOwner(self)
self._abstracts[abstract.getId()] = abstract
for auth in abstract.getPrimaryAuthorList():
self.indexAuthor(auth)
def removeAbstract(self, abstract):
if self._abstracts.has_key(abstract.getId()):
#for auth in abstract.getPrimaryAuthorList():
# self.unindexAuthor(auth)
# * Remove dependencies with another abstracts:
# - If it's an accepted abstract-->remove abstract from contribution
if isinstance(abstract.getCurrentStatus(), AbstractStatusAccepted):
raise NoReportError(_("Cannot remove an accepted abstract before removing the contribution linked to it"))
# If it's a withdrawn abstract-->remove abstract from contribution
if abstract.as_new.contribution:
raise NoReportError(_("Cannot remove the abstract before removing the contribution linked to it"))
for abs in self._abstracts.values():
if abs != abstract:
st = abs.getCurrentStatus()
if isinstance(st, AbstractStatusDuplicated):
#if the abstract to delete is the orginal in another "duplicated", change status to submitted
if st.getOriginal() == abstract:
abs.setCurrentStatus(AbstractStatusSubmitted(abs))
elif isinstance(st, AbstractStatusMerged):
#if the abstract to delete is the target one in another "merged", change status to submitted
if st.getTargetAbstract() == abstract:
abs.setCurrentStatus(AbstractStatusSubmitted(abs))
#unindex participations!!!
self.unregisterParticipation(abstract.getSubmitter())
self._remove_abstract(abstract)
del self._abstracts[abstract.getId()]
abstract.delete()
def recoverAbstract(self, abstract):
self.addAbstract(abstract)
abstract.recoverFromTrashCan()
def getAbstractList(self):
return self._abstracts.values()
def getAbstractById(self, id):
return self._abstracts.get(str(id), None)
def registerParticipation(self, p):
self._participationIdx.index(p)
def unregisterParticipation(self, p):
self._participationIdx.unindex(p)
def getAbstractListForAvatar(self, av):
try:
if self._participationIdx:
pass
except AttributeError, e:
self._participationIdx = self._partipationIdx
self._partipationIdx = None
res = []
for participation in self._participationIdx.getParticipationList(av):
abstract = participation.getAbstract()
if abstract is not None and abstract.isSubmitter(av):
if abstract not in res:
res.append(abstract)
return res
def getAbstractListForAuthorEmail(self, email):
""" Get list of abstracts where the email belongs to an author"""
return [self.getAbstractById(i) for i in self._getAuthEmailIndex().match(email)]
def getNotificationTplList(self):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
return self._notifTplsOrder
def addNotificationTpl(self, tpl):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
try:
if self._notifTplsCounter:
pass
except AttributeError:
self._notifTplsCounter = Counter()
if tpl.getOwner() == self and self._notifTpls.has_key(tpl.getId()):
return
id = tpl.getId()
if id == "":
id = self._notifTplsCounter.newCount()
tpl.includeInOwner(self, id)
self._notifTpls[int(id)] = tpl
self._notifTplsOrder.append(tpl)
def removeNotificationTpl(self, tpl):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
if tpl.getOwner() != self or not self._notifTpls.has_key(int(tpl.getId())):
return
del self._notifTpls[int(tpl.getId())]
self._notifTplsOrder.remove(tpl)
tpl.includeInOwner(None, tpl.getId()) # We don't change the id for
# recovery purposes.
tpl.delete()
def recoverNotificationTpl(self, tpl):
self.addNotificationTpl(tpl)
tpl.recover()
def getNotificationTplById(self, id):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
return self._notifTpls.get(int(id), None)
def getNotifTplForAbstract(self, abs):
"""
"""
for tpl in self.getNotificationTplList():
if tpl.satisfies(abs):
return tpl
return None
def moveUpNotifTpl(self, tpl):
"""
"""
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
if tpl not in self._notifTplsOrder:
return
idx = self._notifTplsOrder.index(tpl)
if idx == 0:
return
self._notifTplsOrder.remove(tpl)
self._notifTplsOrder.insert(idx-1, tpl)
def moveDownNotifTpl(self, tpl):
"""
"""
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
idx = self._notifTplsOrder.index(tpl)
if idx == len(self._notifTplsOrder):
return
self._notifTplsOrder.remove(tpl)
self._notifTplsOrder.insert(idx+1, tpl)
def indexAuthor(self, auth):
a = auth.getAbstract()
if a.isPrimaryAuthor(auth):
self._getPrimAuthIndex().index(auth)
self._getAuthEmailIndex().index(auth)
def unindexAuthor(self, auth):
a = auth.getAbstract()
if a.isPrimaryAuthor(auth):
self._getPrimAuthIndex().unindex(auth)
self._getAuthEmailIndex().unindex(auth)
def _getPrimAuthIndex(self):
try:
if self._primAuthIdx:
pass
except AttributeError:
self._primAuthIdx = _PrimAuthIdx(self)
return self._primAuthIdx
def _getAuthEmailIndex(self):
if not hasattr(self, '_authEmailIdx'):
self._authEmailIdx = _AuthEmailIdx(self)
return self._authEmailIdx
def getAbstractsMatchingAuth(self, query, onlyPrimary=True):
if str(query).strip() == "":
return self.getAbstractList()
res = self._getPrimAuthIndex().match(query)
return [self.getAbstractById(id) for id in res]
def hasAnyEnabledAbstractField(self):
return self.getAbstractFieldsMgr().hasAnyActiveField()
def hasEnabledAbstractField(self, key):
return self.getAbstractFieldsMgr().hasActiveField(key)
def getSubmissionNotification(self):
try:
if self._submissionNotification:
pass
except AttributeError, e:
self._submissionNotification = SubmissionNotification()
return self._submissionNotification
def setSubmissionNotification(self, sn):
self._submissionNotification = sn
def recalculateAbstractsRating(self, scaleLower, scaleHigher):
''' recalculate the values of the rating for all the abstracts in the conference '''
for abs in self.getAbstractList():
abs.updateRating((scaleLower, scaleHigher))
def removeAnswersOfQuestion(self, questionId):
''' Remove a question results for each abstract '''
for abs in self.getAbstractList():
abs.removeAnswersOfQuestion(questionId)
def notifyModification(self):
self._p_changed = 1
class SubmissionNotification(Persistent):
def __init__(self):
self._toList = PersistentList()
self._ccList = PersistentList()
def hasDestination(self):
return self._toList != [] or self._toList != []
def getToList(self):
return self._toList
def setToList(self, tl):
self._toList = tl
def addToList(self, to):
self._toList.append(to)
def clearToList(self):
self._toList = PersistentList()
def getCCList(self):
return self._ccList
def setCCList(self, cl):
self._ccList = cl
def addCCList(self, cc):
self._ccList.append(cc)
def clearCCList(self):
self._ccList = PersistentList()
def clone(self):
nsn = SubmissionNotification()
for i in self.getToList():
nsn.addToList(i)
for i in self.getCCList():
nsn.addCCList(i)
return nsn
class Comment(Persistent):
def __init__(self, res, content=""):
self._abstract = None
self._id = ""
self._responsible = res
self._content = ""
self._creationDate = nowutc()
self._modificationDate = nowutc()
def getLocator(self):
loc = self._abstract.getLocator()
loc["intCommentId"] = self._id
return loc
def includeInAbstract(self, abstract, id):
self._abstract = abstract
self._id = id
def delete(self):
self._abstract = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def _notifyModification(self, dt=None):
if dt:
self._modificationDate = dt
else:
self._modificationDate = nowutc()
def getResponsible(self):
return self._responsible
def getAbstract(self):
return self._abstract
def getId(self):
return self._id
def getContent(self):
return self._content
def setContent(self, newContent):
self._content = newContent
self._notifyModification()
def getCreationDate(self):
return self._creationDate
def getModificationDate(self):
return self._modificationDate
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user)
def canUserModify(self, user):
abstract = self.getAbstract()
conf = abstract.getConference()
return self.getResponsible() == user and \
(abstract.canUserModify(user) or \
len(conf.getConference().getCoordinatedTracks(user)) > 0)
class Abstract(AbstractLegacyMixin, Persistent):
def __init__(self, owner, id, submitter, **abstractData):
self._setOwner( owner )
self._setId( id )
self._title = ""
self._authorGen = Counter()
self._authors = OOBTree()
self._primaryAuthors = PersistentList()
self._coAuthors = PersistentList()
self._speakers = PersistentList()
self._tracks = OOBTree()
self._contribTypes = PersistentList( [""] )
self._setSubmissionDate( nowutc() )
self._modificationDate = nowutc()
self._currentStatus = AbstractStatusSubmitted( self )
self._trackAcceptances = OOBTree()
self._trackRejections = OOBTree()
self._trackReallocations = OOBTree()
self._trackJudgementsHistorical={}
self._comments = ""
self._contribution = None
self._intCommentGen=Counter()
self._intComments=PersistentList()
self._mergeFromList = PersistentList()
self._notifLog=NotificationLog(self)
self._submitter=None
self._setSubmitter( submitter )
self._rating = None # It needs to be none to avoid the case of having the same value as the lowest value in the judgement
self._attachments = {}
self._attachmentsCounter = Counter()
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def clone(self, conference, abstractId):
# abstractId - internal in abstract manager of the conference
abs = Abstract(conference.getAbstractMgr(), abstractId, self.getSubmitter().getAvatar())
abs.setTitle(self.getTitle())
for key in self.getFields().keys():
abs.setField(key,self.getField(key))
abs.setComments(self.getComments())
abs._setSubmissionDate(self.getSubmissionDate())
abs._modificationDate = self.getModificationDate()
# Cloning of primary- and coauthors
# if an author is also a speaker, an appropriate object will be
# appended also to the speaker list
for pa in self.getPrimaryAuthorList() :
npa = abs.newPrimaryAuthor(**(pa.getData()))
if self.isSpeaker(pa) :
abs.addSpeaker(npa)
for ca in self.getCoAuthorList() :
nca = abs.newCoAuthor(**(ca.getData()))
if self.isSpeaker(ca) :
abs.addSpeaker(nca)
# Cloning of speakers
# only those, who are not authors :
for sp in self.getSpeakerList() :
if not self.isAuthor(sp) :
abs.addSpeaker(sp.clone())
abs.setSubmitter(self.getSubmitter().getAvatar())
abs.as_new.type = self.as_new.type
# the track, to which the abstract belongs to
# legacy list implementation
for tr in self.getTrackList() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == tr.getTitle() :
abs.addTrack(newtrack)
# overall abstract status (accepted / rejected)
abs._currentStatus = self._currentStatus.clone(abs)
abs.as_new.accepted_track_id = self.as_new.track.id if self.as_new.track else None
abs.as_new.accepted_type = self.as_new.type
for ta in self.getTrackAcceptanceList() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == ta.getTrack().getTitle() :
newta = ta.clone(newtrack)
abs._addTrackAcceptance(newta)
abs._addTrackJudgementToHistorical(newta)
for trj in self.getTrackRejections().values() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == trj.getTrack().getTitle() :
newtrj = trj.clone(newtrack)
abs._addTrackRejection(newtrj)
abs._addTrackJudgementToHistorical(newtrj)
for trl in self.getTrackReallocations().values() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == trl.getTrack().getTitle() :
newtrl = trl.clone(newtrack)
abs._addTrackReallocation(newtrl)
abs._addTrackJudgementToHistorical(newtrl)
# Cloning materials
for f in self.getAttachments().values():
newFile = f.clone(abs, protection=False)
abs.__addFile(newFile)
return abs
def getUniqueId( self ):
"""returns (string) the unique identifier of the item"""
"""used only in the web session access key table"""
"""it is the same as the conference since only the conf can"""
"""be protected with an access key"""
return self.getConference().getUniqueId()
def getMergeFromList(self):
try:
return self._mergeFromList
except AttributeError:
self._mergeFromList = PersistentList()
return self._mergeFromList
def addMergeFromAbstract(self, abstract):
try:
if self._mergeFromList:
pass
except AttributeError:
self._mergeFromList = PersistentList()
self._mergeFromList.append(abstract)
def removeMergeFromAbstract(self, abstract):
try:
if self._mergeFromList:
pass
except AttributeError:
self._mergeFromList = PersistentList()
if abstract in self._mergeFromList:
self._mergeFromList.remove(abstract)
def getComments(self):
try:
return self._comments
except AttributeError:
self._comments = ""
return self._comments
def setComments(self, comments):
self._comments = comments
def __addFile(self, file):
file.archive(self.getConference()._getRepository())
self.getAttachments()[file.getId()] = file
self._notifyModification()
def saveFiles(self, files):
cfg = Config.getInstance()
from MaKaC.conference import LocalFile
for fileUploaded in files:
if fileUploaded.filename:
# create a temp file
tempPath = cfg.getUploadedFilesTempDir()
tempFileName = tempfile.mkstemp(suffix="IndicoAbstract.tmp", dir=tempPath)[1]
f = open(tempFileName, "wb")
f.write(fileUploaded.file.read() )
f.close()
file = LocalFile()
file.setFileName(fileUploaded.filename)
file.setFilePath(tempFileName)
file.setOwner(self)
file.setId(self._getAttachmentsCounter())
self.__addFile(file)
def deleteFilesNotInList(self, keys):
"""This method is used in order to delete all the files that are not present (by id) in the
parameter "keys".
This is useful when files are deleted from the abstract form using Javascript, and so it is
the only way to know that they are deleted.
"""
existingKeys = self.getAttachments().keys()
for key in existingKeys:
if not key in keys:
self._deleteFile(key)
def _deleteFile(self, key):
file = self.getAttachments()[key]
file.delete()
del self.getAttachments()[key]
self._notifyModification()
def _setOwner( self, owner ):
self._owner = owner
def getOwner( self ):
return self._owner
def _setId( self, id ):
self._id = str( id )
def getId(self):
return self._id
def _setSubmissionDate( self, newDate ):
self._submissionDate = newDate
def setModificationDate(self, dt = None):
if dt:
self._modificationDate = dt
else:
self._modificationDate = nowutc()
def _notifyModification( self, dt=None ):
self.setModificationDate(dt)
self._p_changed = 1
def getModificationDate( self ):
return self._modificationDate
def _setSubmitter( self, av ):
if not av:
raise MaKaCError( _("An abstract must have a submitter"))
if self._submitter:
self.getOwner().unregisterParticipation( self._submitter )
self._submitter.getUser().unlinkTo(self, "submitter")
self._submitter.delete()
self._submitter=Submitter( self, av )
av.linkTo(self, "submitter")
self.getOwner().registerParticipation( self._submitter )
self._notifyModification()
def recoverSubmitter(self, subm):
if not subm:
raise MaKaCError( _("An abstract must have a submitter"))
if self._submitter:
self.getOwner().unregisterParticipation( self._submitter )
self._submitter.delete()
self._submitter = subm
self._submitter.setAbstract(self)
self.getOwner().registerParticipation( self._submitter )
subm.recover()
self._notifyModification()
def setSubmitter( self, av ):
self._setSubmitter(av)
def getSubmitter( self ):
return self._submitter
def isSubmitter( self, av ):
return self.getSubmitter().representsUser( av )
def setTitle(self, title):
self._title = title.strip()
self._notifyModification()
def getTitle(self):
return self._title
@property
def title(self):
return self._title
def getSubmissionDate( self ):
try:
if self._submissionDate:
pass
except AttributeError:
self._submissionDate=nowutc()
return self._submissionDate
def getConference( self ):
mgr = self.getOwner()
return mgr.getOwner() if mgr else None
def _newAuthor( self, **data ):
author = Author( self, **data )
author.setId( self._authorGen.newCount() )
self._authors[ author.getId() ] = author
return author
def _removeAuthor(self,part):
if not self.isAuthor(part):
return
part.delete()
del self._authors[part.getId()]
def isAuthor( self, part ):
return self._authors.has_key( part.getId() )
def getAuthorList( self ):
return self._authors.values()
def getAuthorById(self, id):
return self._authors.get(str(id), None)
def clearAuthors( self ):
self.clearPrimaryAuthors()
self.clearCoAuthors()
self._notifyModification()
def newPrimaryAuthor(self,**data):
auth=self._newAuthor(**data)
self._addPrimaryAuthor(auth)
self._notifyModification()
return auth
def isPrimaryAuthor( self, part ):
return part in self._primaryAuthors
def getPrimaryAuthorList( self ):
return self._primaryAuthors
#XXX: I keep it for compatibility but it should be removed
getPrimaryAuthorsList = getPrimaryAuthorList
def getPrimaryAuthorEmailList(self, lower=False):
emailList = []
for pAuthor in self.getPrimaryAuthorList():
emailList.append(pAuthor.getEmail().lower() if lower else pAuthor.getEmail())
return emailList
def clearPrimaryAuthors(self):
while len(self._primaryAuthors)>0:
self._removePrimaryAuthor(self._primaryAuthors[0])
self._notifyModification()
def _addPrimaryAuthor( self, part ):
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as primary author is not an author of the abstract"))
if part in self._primaryAuthors:
return
self._primaryAuthors.append( part )
self.getOwner().indexAuthor(part)
def _removePrimaryAuthor(self,part):
if not self.isPrimaryAuthor(part):
return
if self.isSpeaker(part):
self.removeSpeaker(part)
self.getOwner().unindexAuthor(part)
self._primaryAuthors.remove(part)
self._removeAuthor(part)
def recoverPrimaryAuthor(self, auth):
self._authors[ auth.getId() ] = auth
auth.setAbstract(self)
self._addPrimaryAuthor(auth)
auth.recover()
self._notifyModification()
def newCoAuthor(self,**data):
auth=self._newAuthor(**data)
self._addCoAuthor(auth)
self._notifyModification()
return auth
def _comp_CoAuthors(self):
try:
if self._coAuthors!=None:
return
except AttributeError:
self._coAuthors=PersistentList()
for auth in self._authors.values():
if not self.isPrimaryAuthor(auth):
self._addCoAuthor(auth)
def isCoAuthor( self, part ):
self._comp_CoAuthors()
return part in self._coAuthors
def getCoAuthorList( self ):
self._comp_CoAuthors()
return self._coAuthors
def getCoAuthorEmailList(self, lower=False):
emailList = []
for coAuthor in self.getCoAuthorList():
emailList.append(coAuthor.getEmail().lower() if lower else coAuthor.getEmail())
return emailList
def clearCoAuthors(self):
while len(self._coAuthors)>0:
self._removeCoAuthor(self._coAuthors[0])
self._notifyModification()
def _addCoAuthor( self, part ):
self._comp_CoAuthors()
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as primary author is not an author of the abstract"))
if part in self._coAuthors:
return
self._coAuthors.append( part )
def _removeCoAuthor(self,part):
if not self.isCoAuthor(part):
return
if self.isSpeaker(part):
self.removeSpeaker(part)
self._coAuthors.remove(part)
self._removeAuthor(part)
def recoverCoAuthor(self, auth):
self._authors[ auth.getId() ] = auth
auth.setAbstract(self)
self._addCoAuthor(auth)
auth.recover()
self._notifyModification()
def addSpeaker( self, part ):
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as speaker is not an author of the abstract"))
if part in self._speakers:
return
self._speakers.append( part )
self._notifyModification()
def removeSpeaker(self,part):
if part not in self._speakers:
return
self._speakers.remove(part)
def clearSpeakers( self ):
while len(self.getSpeakerList()) > 0:
self.removeSpeaker(self.getSpeakerList()[0])
self._speakers = PersistentList()
def getSpeakerList( self ):
return self._speakers
def isSpeaker( self, part ):
return part in self._speakers
def _addTrack( self, track ):
"""Adds the specified track to the suggested track list. Any
verification must be done by the caller.
"""
self._tracks[ track.getId() ] = track
track.addAbstract( self )
self._notifyModification()
def addTrack( self, track ):
self._changeTracksImpl()
if not self._tracks.has_key( track.getId() ):
self._addTrack( track )
self.getCurrentStatus().update()
def _removeTrack( self, track ):
"""Removes the specified track from the track list. Any verification
must be done by the caller.
"""
del self._tracks[ track.getId() ]
track.removeAbstract( self )
self._notifyModification()
def removeTrack( self, track ):
if self._tracks.has_key( track.getId() ):
self._removeTrack( track )
self.getCurrentStatus().update()
if isinstance(self.getCurrentStatus(), AbstractStatusAccepted):
self.getCurrentStatus()._setTrack(None)
def _changeTracksImpl( self ):
if self._tracks.__class__ != OOBTree:
oldTrackList = self._tracks
self._tracks = OOBTree()
for track in oldTrackList:
self._addTrack( track )
self.getCurrentStatus().update()
def getTrackList( self ):
self._changeTracksImpl()
return self._tracks.values()
def getAcceptedTrack(self):
status = self.getCurrentStatus()
if status is None:
return None
if isinstance(status, AbstractStatusAccepted):
return status.getTrack()
def hasTrack( self, track ):
self._changeTracksImpl()
return self._tracks.has_key( track.getId() )
def getTrackListSorted( self ):
self._changeTracksImpl()
return self.getConference().sortTrackList( self._tracks.values() )
def clearTracks( self ):
self._changeTracksImpl()
while len(self.getTrackList())>0:
track = self.getTrackList()[0]
self._removeTrack( track )
self.getCurrentStatus().update()
def setTracks( self, trackList ):
"""Set the suggested track classification of the current abstract to
the specified list
"""
#We need to do it in 2 steps otherwise the list over which we are
# iterating gets modified
toBeRemoved = []
toBeAdded = copy( trackList )
for track in self.getTrackList():
if track not in trackList:
toBeRemoved.append( track )
else:
toBeAdded.remove( track )
for track in toBeRemoved:
self._removeTrack( track )
for track in toBeAdded:
self._addTrack( track )
self.getCurrentStatus().update()
def isProposedForTrack( self, track ):
return self._tracks.has_key( track.getId() )
def getNumTracks(self):
return len( self._tracks )
def getLocator(self):
loc = self.getConference().getLocator()
loc["abstractId"] = self.getId()
return loc
def isAllowedToCoordinate(self, av):
"""Tells whether or not the specified user can coordinate any of the
tracks of this abstract
"""
for track in self.getTrackList():
if track.canUserCoordinate(av):
return True
return False
def canAuthorAccess(self, user):
if user is None:
return False
el = self.getCoAuthorEmailList(True)+self.getPrimaryAuthorEmailList(True)
for e in user.getEmails():
if e.lower() in el:
return True
return False
def isAllowedToAccess(self, av):
"""Tells whether or not an avatar can access an abstract independently
of the protection
"""
#any author is allowed to access
#CFA managers are allowed to access
#any user being able to modify is also allowed to access
#any TC is allowed to access
if self.canAuthorAccess(av):
return True
if self.isAllowedToCoordinate(av):
return True
return self.canUserModify(av)
def canAccess(self, aw):
#if the conference is protected, then only allowed AW can access
return self.isAllowedToAccess(aw.getUser())
def canView(self, aw):
#in the future it would be possible to add an access control
#only those users allowed to access are allowed to view
return self.isAllowedToAccess(aw.getUser())
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user)
def canUserModify(self, av):
#the submitter can modify
if self.isSubmitter(av):
return True
#??? any CFA manager can modify
#??? any user granted with modification privileges can modify
#conference managers can modify
conf = self.getConference()
return conf.canUserModify(av)
def getModifKey(self):
return ""
def getAccessKey(self):
return ""
def getAccessController(self):
return self.getConference().getAccessController()
def isProtected(self):
return self.getConference().isProtected()
def delete(self):
if self._owner:
self.getOwner().unregisterParticipation(self._submitter)
self._submitter.getUser().unlinkTo(self, "submitter")
self._submitter.delete()
self._submitter = None
self.clearAuthors()
self.clearSpeakers()
self.clearTracks()
owner = self._owner
self._owner = None
owner.removeAbstract(self)
self.setCurrentStatus(AbstractStatusNone(self))
TrashCanManager().add(self)
def recoverFromTrashCan(self):
TrashCanManager().remove(self)
def getCurrentStatus(self):
try:
if self._currentStatus:
pass
except AttributeError, e:
self._currentStatus = AbstractStatusSubmitted(self)
return self._currentStatus
def setCurrentStatus(self, newStatus):
self._currentStatus = newStatus
#If we want to keep a history of status changes we should add here
# the old status to a list
def accept(self, responsible, destTrack, type, comments="", session=None):
"""
"""
self.getCurrentStatus().accept(responsible, destTrack, type, comments)
# add the abstract to the track for which it has been accepted so it
# is visible for it.
if destTrack is not None:
destTrack.addAbstract(self)
contrib = contribution_from_abstract(self, session)
self.as_new.contribution = contrib
def reject(self, responsible, comments=""):
"""
"""
self.getCurrentStatus().reject(responsible, comments)
def _cmpByDate(self, tj1, tj2):
return cmp(tj1.getDate(), tj2.getDate())
def getTrackJudgementsHistorical(self):
try:
if self._trackJudgementsHistorical:
pass
if type(self._trackJudgementsHistorical) == tuple:
self._trackJudgementsHistorical = {}
except AttributeError:
self._trackJudgementsHistorical = {}
for track in self.getTrackList():
judgement = None
if self.getTrackAcceptances().has_key(track.getId()):
judgement = self.getTrackAcceptances()[track.getId()]
elif self.getTrackRejections().has_key(track.getId()):
judgement = self.getTrackRejections()[track.getId()]
elif self.getTrackReallocations().has_key(track.getId()):
judgement = self.getTrackReallocations()[track.getId()]
self._trackJudgementsHistorical[track.getId()] = [judgement]
self._notifyModification()
return self._trackJudgementsHistorical
def getJudgementHistoryByTrack(self, track):
id = "notrack"
if track is not None:
id = track.getId()
if self.getTrackJudgementsHistorical().has_key(id):
return self.getTrackJudgementsHistorical()[id]
return []
def _addTrackJudgementToHistorical(self, tj):
id = "notrack"
if tj.getTrack() is not None:
id = tj.getTrack().getId()
if self.getTrackJudgementsHistorical().has_key(id):
if tj not in self.getTrackJudgementsHistorical()[id]:
self.getTrackJudgementsHistorical()[id].insert(0, tj)
else:
self.getTrackJudgementsHistorical()[id] = [tj]
self._notifyModification()
def _removeTrackAcceptance( self, track ):
"""
"""
if self.getTrackAcceptances().has_key( track.getId() ):
del self.getTrackAcceptances()[ track.getId() ]
def _addTrackAcceptance( self, judgement ):
"""
"""
self._removeTrackRejection( judgement.getTrack() )
self._removeTrackReallocation( judgement.getTrack() )
self.getTrackAcceptances()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _removeTrackRejection( self, track ):
"""
"""
if self.getTrackRejections().has_key( track.getId() ):
del self.getTrackRejections()[ track.getId() ]
def _addTrackRejection( self, judgement ):
"""
"""
self._removeTrackAcceptance( judgement.getTrack() )
self._removeTrackReallocation( judgement.getTrack() )
self.getTrackRejections()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _removeTrackReallocation( self, track ):
"""
"""
if self.getTrackReallocations().has_key( track.getId() ):
del self.getTrackReallocations()[ track.getId() ]
def _addTrackReallocation( self, judgement ):
"""
"""
self._removeTrackAcceptance( judgement.getTrack() )
self._removeTrackRejection( judgement.getTrack() )
self.getTrackReallocations()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _clearTrackRejections( self ):
while len(self.getTrackRejections().values())>0:
t = self.getTrackRejections().values()[0].getTrack()
self._removeTrackRejection( t )
def _clearTrackAcceptances( self ):
while len(self.getTrackAcceptances().values())>0:
t = self.getTrackAcceptances().values()[0].getTrack()
self._removeTrackAcceptance( t )
def _clearTrackReallocations( self ):
while len(self.getTrackReallocations().values())>0:
t = self.getTrackReallocations().values()[0].getTrack()
self._removeTrackReallocation(t)
def _removePreviousJud(self, responsible, track):
''' Check if there is a previous judgement and remove it '''
toDelete = [] # list of judgements to delete
for jud in self.getJudgementHistoryByTrack(track):
if jud.getResponsible() == responsible:
toDelete.append(jud)
for x in toDelete:
self.getTrackJudgementsHistorical()[track.getId()].remove(x)
if isinstance(x, AbstractAcceptance):
self._del_judgment(x)
def proposeToAccept( self, responsible, track, contribType, comment="", answers=[] ):
"""
"""
# the proposal has to be done for a track
if track is None:
raise MaKaCError( _("You have to choose a track in order to do the proposal. If there are not tracks to select, please change the track assignment of the abstract"))
#We check the track for which the abstract is proposed to be accepted
# is in the current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to accept an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
# Create the new judgement
jud = AbstractAcceptance(self, track, responsible, contribType, answers)
self._add_judgment(jud)
jud.setComment( comment )
self._addTrackAcceptance( jud )
# Update the rating of the abstract
self.updateRating()
#We trigger the state transition
self.getCurrentStatus().proposeToAccept()
def proposeToReject( self, responsible, track, comment="", answers=[] ):
"""
"""
# the proposal has to be done for a track
if track is None:
raise MaKaCError( _("You have to choose a track in order to do the proposal. If there are not tracks to select, please change the track assignment of the abstract"))
#We check the track for which the abstract is proposed to be accepted
# is in the current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to reject an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
# Create the new judgement
jud = AbstractRejection(self, track, responsible, answers)
jud.setComment( comment )
self._addTrackRejection( jud )
# Update the rating of the abstract
self.updateRating()
#We trigger the state transition
self.getCurrentStatus().proposeToReject()
def proposeForOtherTracks( self, responsible, track, comment, propTracks, answers=[] ):
"""
"""
#We check the track which proposes to allocate the abstract is in the
# current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to reallocate an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
#We keep the track judgement
jud = AbstractReallocation(self, track, responsible, propTracks, answers)
jud.setComment( comment )
self._addTrackReallocation( jud )
#We add the proposed tracks to the abstract
for track in propTracks:
self._addTrack( track )
#We trigger the state transition
self.getCurrentStatus().proposeToReallocate()
# Update the rating of the abstract
self.updateRating()
def withdraw(self,resp,comment=""):
"""
"""
self.getCurrentStatus().withdraw(resp,comment)
def recover( self ):
"""Puts a withdrawn abstract back in the list of submitted abstracts.
HAS NOTHING TO DO WITH THE RECOVERY PROCESS...
"""
#we must clear any track judgement
#self._clearTrackAcceptances()
#self._clearTrackRejections()
#self._clearTrackReallocations()
self.getCurrentStatus().recover() #status change
#if succeeded we must reset the submission date
self._setSubmissionDate( nowutc() )
self._notifyModification()
def getTrackJudgement( self, track ):
if not self.getJudgementHistoryByTrack(track):
return None
lastJud = self.getJudgementHistoryByTrack(track)[0]
# check if judgements for specified trak are the same. If not there is a conflict.
if all(jud.__class__ == lastJud.__class__ for jud in self.getJudgementHistoryByTrack(track)):
return lastJud
return AbstractInConflict(self, track)
def getTrackAcceptances( self ):
try:
if self._trackAcceptances:
pass
except AttributeError, e:
self._trackAcceptances = OOBTree()
return self._trackAcceptances
def getTrackAcceptanceList( self ):
res = []
for trackId in intersection( self._tracks, self.getTrackAcceptances() ):
res.append( self.getTrackAcceptances()[ trackId ] )
return res
def getNumProposedToAccept( self ):
return len( intersection( self._tracks, self.getTrackAcceptances() ) )
def getTrackRejections( self ):
try:
if self._trackRejections:
pass
except AttributeError, e:
self._trackRejections = OOBTree()
return self._trackRejections
def getNumProposedToReject( self ):
return len( intersection( self._tracks, self.getTrackRejections() ) )
def getTrackReallocations( self ):
try:
if self._trackReallocations:
pass
except AttributeError, e:
self._trackReallocations = OOBTree()
return self._trackReallocations
def getNumProposedToReallocate( self ):
return len( intersection( self._tracks, self.getTrackReallocations() ) )
def getNumJudgements( self ):
"""
Returns the number of tracks for which some proposal has been done.
For instance, let's suppose:
Track 1: 2 propose to accept, 3 propose to reject
Track 2: 1 propose to accept
Track 3: None
The result would be 2 (out of 3)
"""
tmp1 = union( self.getTrackAcceptances(), self.getTrackRejections() )
judgements = union( tmp1, self.getTrackReallocations() )
return len( intersection( self._tracks, judgements ) )
def getReallocationTargetedList( self, track ):
#XXX: not optimal
res = []
for r in self.getTrackReallocations().values():
if track in r.getProposedTrackList():
res.append( r )
return res
def getIntCommentList(self):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
return self._intComments
def addIntComment(self,newComment):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
try:
if self._intCommentsGen:
pass
except AttributeError:
self._intCommentsGen=Counter()
if newComment in self._intComments:
return
id = newComment.getId()
if id == "":
id = self._authorGen.newCount()
newComment.includeInAbstract(self, id)
self._intComments.append(newComment)
def getIntCommentById(self,id):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
for comment in self._intComments:
if id.strip()==comment.getId():
return comment
return None
def clearIntCommentList(self):
while len(self.getIntCommentList()) > 0:
self.removeIntComment(self.getIntCommentList()[0])
def removeIntComment(self,comment):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
if comment not in self._intComments:
return
self._intComments.remove(comment)
comment.delete()
def recoverIntComment(self, comment):
self.addIntComment(comment)
comment.recover()
def markAsDuplicated(self,responsible,originalAbstract,comments="", track=None, answers=[]):
"""
"""
self.getCurrentStatus().markAsDuplicated(responsible,originalAbstract,comments)
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
if track is not None:
jud = AbstractMarkedAsDuplicated(self, track, responsible, originalAbstract, answers)
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
else:
for t in self.getTrackList():
jud = AbstractMarkedAsDuplicated(self, t, responsible, originalAbstract, answers)
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
# Update the rating of the abstract
self.updateRating()
def unMarkAsDuplicated(self,responsible,comments="", track=None, answers=[]):
"""
"""
#we must clear any track judgement
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
#self.getCurrentStatus().recover() #status change
self.getCurrentStatus().unMarkAsDuplicated(responsible,comments)
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
if track is not None:
jud = AbstractUnMarkedAsDuplicated(self, track, responsible, answers)
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
else:
for t in self.getTrackList():
jud = AbstractUnMarkedAsDuplicated(self, t, responsible, answers )
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
# Update the rating of the abstract
self.updateRating()
self._notifyModification()
def mergeInto(self,responsible,targetAbs,mergeAuthors=False,comments=""):
"""
"""
self.getCurrentStatus().mergeInto(responsible,targetAbs,comments)
targetAbs.addMergeFromAbstract(self)
if mergeAuthors:
#for auth in self.getAuthorList():
# newAuth=targetAbs.newAuthor()
# newAuth.setFromAbstractParticipation(auth)
# if self.isPrimaryAuthor(auth):
# targetAbs.addPrimaryAuthor(newAuth)
for auth in self.getPrimaryAuthorList():
newAuth=targetAbs.newPrimaryAuthor()
newAuth.setFromAbstractParticipation(auth)
for auth in self.getCoAuthorList():
newAuth=targetAbs.newCoAuthor()
newAuth.setFromAbstractParticipation(auth)
def notify(self,notificator,responsible):
"""notifies the abstract responsibles with a matching template
"""
tpl=self.getOwner().getNotifTplForAbstract(self)
if not tpl:
return
notificator.notify(self,tpl)
self.getNotificationLog().addEntry(NotifLogEntry(responsible,tpl))
def unMerge(self,responsible,comments=""):
#we must clear any track judgement
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
self.getCurrentStatus().getTargetAbstract().removeMergeFromAbstract(self)
self.getCurrentStatus().unMerge(responsible,comments)
self._notifyModification()
def getNotificationLog(self):
try:
if self._notifLog:
pass
except AttributeError:
self._notifLog=NotificationLog(self)
return self._notifLog
# Rating methods
def getRating(self):
""" Get the average rating of the abstract """
try:
if self._rating:
pass
except AttributeError:
self._rating = None
return self._rating
def updateRating(self, scale = None):
"""
Update the average rating of the abstract which is calculated with the average of each judgement.
If the scale (tuple with lower,higher) is passed, the judgement are re-adjusted to the new scale.
"""
self._rating = None
# calculate the total valoration
judNum = 0
ratingSum = 0
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
if scale:
# calculate the new values for each judgement
scaleLower, scaleHigher = scale
jud.recalculateJudgementValues(scaleLower, scaleHigher)
if jud.getJudValue() != None: # it means there is a numeric value for the judgement
ratingSum += jud.getJudValue()
judNum += 1
# Calculate the average
if judNum != 0:
self._rating = float(ratingSum) / judNum
def getQuestionsAverage(self):
'''Get the list of questions answered in the reviews for an abstract '''
dTotals = {} # {idQ1: total_value, idQ2: total_value ...}
dTimes = {} # {idQ1: times_answered, idQ2: times_answered}
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
for answer in jud.getAnswers():
# check if the question is in d and sum the answers value or insert in d the new question
if dTotals.has_key(answer.getQuestion().getText()):
dTotals[answer.getQuestion().getText()] += answer.getValue()
dTimes[answer.getQuestion().getText()] += 1
else: # first time
dTotals[answer.getQuestion().getText()] = answer.getValue()
dTimes[answer.getQuestion().getText()] = 1
# get the questions average
questionsAverage = {}
for q, v in dTotals.iteritems():
# insert the element and calculate the average for the value
questionsAverage[q] = float(v)/dTimes[q]
return questionsAverage
def removeAnswersOfQuestion(self, questionId):
''' Remove the answers of the question with questionId value '''
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
jud.removeAnswer(questionId)
def getRatingPerReviewer(self, user, track):
"""
Get the rating of the user for the abstract in the track given.
"""
for jud in self.getJudgementHistoryByTrack(track):
if (jud.getResponsible() == user):
return jud.getJudValue()
def getLastJudgementPerReviewer(self, user, track):
"""
Get the last judgement of the user for the abstract in the track given.
"""
for jud in self.getJudgementHistoryByTrack(track):
if (jud.getResponsible() == user):
return jud
def _getAttachmentsCounter(self):
try:
if self._attachmentsCounter:
pass
except AttributeError:
self._attachmentsCounter = Counter()
return self._attachmentsCounter.newCount()
def setAttachments(self, attachments):
self._attachments = attachments
def getAttachments(self):
try:
if self._attachments:
pass
except AttributeError:
self._attachments = {}
return self._attachments
def getAttachmentById(self, id):
return self.getAttachments().get(id, None)
class AbstractJudgement(AbstractJudgmentLegacyMixin, Persistent):
"""This class represents each of the judgements made by a track about a
certain abstract. Each track for which an abstract is proposed can
make a judgement proposing the abstract to be accepted or rejected.
Different track judgements must be kept so the referees who have to
take the final decission can overview different opinions from the
track coordinators.
Together with the judgement some useful information like the date when
it was done and the user who did it will be kept.
"""
def __init__( self, abstract, track, responsible, answers ):
self._abstract = abstract
self._track = track
self._setResponsible( responsible )
self._comment = ""
self._date = nowutc()
self._answers = answers
self._judValue = self.calculateJudgementAverage() # judgement average value
self._totalJudValue = self.calculateAnswersTotalValue()
def _setResponsible( self, newRes ):
self._responsible = newRes
def getResponsible( self ):
return self._responsible
def getDate( self ):
return self._date
def setDate(self, date):
self._date = date
def getTrack( self ):
return self._track
def setComment( self, newComment ):
self._comment = newComment.strip()
def getComment( self ):
return self._comment
def getAnswers(self):
try:
if self._answers:
pass
except AttributeError:
self._answers = []
return self._answers
def calculateJudgementAverage(self):
'''Calculate the average value of the given answers'''
result = 0
if (len(self.getAnswers()) != 0):
# convert the values into float types
floatList = [ans.getValue() for ans in self._answers]
result = sum(floatList) / float(len(floatList)) # calculate the average
else:
# there are no questions
result = None
return result
def getJudValue(self):
try:
if self._judValue:
pass
except AttributeError:
self._judValue = self.calculateJudgementAverage() # judgement average value
return self._judValue
def getTotalJudValue(self):
try:
if self._totalJudValue:
pass
except AttributeError:
self._totalJudValue = self.calculateAnswersTotalValue()
return self._totalJudValue
def calculateAnswersTotalValue(self):
''' Calculate the sum of all the ratings '''
result = 0
for ans in self.getAnswers():
result += ans.getValue()
return result
def recalculateJudgementValues(self, scaleLower, scaleHigher):
''' Update the values of the judgement. This function is called when the scale is changed.'''
for ans in self.getAnswers():
ans.calculateRatingValue(scaleLower, scaleHigher)
self._judValue = self.calculateJudgementAverage()
self._totalJudValue = self.calculateAnswersTotalValue()
def removeAnswer(self, questionId):
''' Remove the current answers of the questionId '''
for ans in self.getAnswers():
if ans.getQuestion().getId() == questionId:
self._answers.remove(ans)
self._notifyModification()
def _notifyModification(self):
self._p_changed = 1
class AbstractAcceptance( AbstractJudgement ):
def __init__(self, abstract, track, responsible, contribType, answers):
AbstractJudgement.__init__(self, abstract, track, responsible, answers)
self._contribType = contribType
def getDate( self ):
return self.as_new.creation_dt
def setDate(self, date):
self.as_new.creation_dt = date
def clone(self,track):
aa = AbstractAcceptance(self._abstract, track, self.getResponsible(), self.getContribType(), self.getAnswers())
return aa
class AbstractRejection( AbstractJudgement ):
def clone(self, track):
arj = AbstractRejection(self._abstract, track, self.getResponsible(), self.getAnswers())
return arj
class AbstractReallocation( AbstractJudgement ):
def __init__(self, abstract, track, responsible, propTracks, answers):
AbstractJudgement.__init__(self, abstract, track, responsible, answers)
self._proposedTracks = PersistentList( propTracks )
def clone(self, track):
arl = AbstractReallocation(self._abstract, track, self.getResponsible(), self.getProposedTrackList(),
self.getAnswers())
return arl
def getProposedTrackList( self ):
return self._proposedTracks
class AbstractInConflict( AbstractJudgement ):
def __init__(self, abstract, track):
AbstractJudgement.__init__(self, abstract, track, None, '')
def clone(self, track):
aic = AbstractInConflict(self._abstract, track, None, '')
return aic
class AbstractMarkedAsDuplicated( AbstractJudgement ):
def __init__(self, abstract, track, responsible, originalAbst, answers):
AbstractJudgement.__init__(self, abstract, track, responsible, answers)
self._originalAbst = originalAbst
def clone(self, track):
amad = AbstractMarkedAsDuplicated(self._abstract, track, self.getResponsible(), self.getOriginalAbstract(),
self.getAnswers())
return amad
def getOriginalAbstract(self):
return self._originalAbst
class AbstractUnMarkedAsDuplicated( AbstractJudgement ):
def clone(self, track):
auad = AbstractUnMarkedAsDuplicated(self._abstract, track,self.getResponsible())
return auad
class AbstractStatus( Persistent ):
"""This class represents any of the status in which an abstract can be.
From the moment they are submitted (and therefore created), abstracts
can go throuugh different status each having a different meaning.
As there can be many status, the transitions between them are quite
complex and as the system evolves we could require to add or delete
new status the "Status" pattern is applied. This is the base class.
Apart from giving information about the status of an abstract, this
class is responsible to store information about how the status was
reached (who provoke the transition, when, ...).
"""
_name = ""
def __init__( self, abstract ):
self._setAbstract( abstract )
self._setDate( nowutc() )
def getName(self):
return self._name
def _setAbstract( self, abs ):
self._abstract = abs
def getAbstract( self ):
return self._abstract
def _setDate( self, date ):
self._date = date
def getDate( self ):
return self._date
def accept(self, responsible, destTrack, type_, comments=""):
"""
"""
abstract = self.getAbstract()
s = AbstractStatusAccepted(abstract, responsible, comments)
abstract.as_new.accepted_track_id = destTrack.id if destTrack else None
abstract.as_new.accepted_type = type_
self.getAbstract().setCurrentStatus(s)
def reject( self, responsible, comments = "" ):
"""
"""
s = AbstractStatusRejected(self.getAbstract(), responsible, comments)
self.getAbstract().setCurrentStatus(s)
def _getStatusClass( self ):
"""
"""
numAccepts = self._abstract.getNumProposedToAccept() # number of tracks that have at least one proposal to accept
numReallocate = self._abstract.getNumProposedToReallocate() # number of tracks that have at least one proposal to reallocate
numJudgements = self._abstract.getNumJudgements() # number of tracks that have at least one judgement
if numJudgements > 0:
# If at least one track status is in conflict the abstract status is in conflict too.
if any(isinstance(self._abstract.getTrackJudgement(track), AbstractInConflict) for track in self._abstract.getTrackList()):
return AbstractStatusInConflict
numTracks = self._abstract.getNumTracks() # number of tracks that this abstract has assigned
if numTracks == numJudgements: # Do we have judgements for all tracks?
if numReallocate == numTracks:
return AbstractStatusInConflict
elif numAccepts == 1:
return AbstractStatusProposedToAccept
elif numAccepts == 0:
return AbstractStatusProposedToReject
return AbstractStatusInConflict
return AbstractStatusUnderReview
return AbstractStatusSubmitted
def update( self ):
"""
"""
newStatusClass = self._getStatusClass()
if self.__class__ != newStatusClass:
self.getAbstract().setCurrentStatus( newStatusClass( self._abstract ) )
def proposeToAccept( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def proposeToReject( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def proposeToReallocate( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def withdraw(self,resp,comments=""):
"""
"""
s=AbstractStatusWithdrawn(self.getAbstract(), resp, self, comments)
self.getAbstract().setCurrentStatus(s)
def recover( self ):
"""
"""
raise MaKaCError( _("only withdrawn abstracts can be recovered"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
"""
"""
if self.getAbstract()==originalAbs:
raise MaKaCError( _("the original abstract is the same as the duplicated one"))
if isinstance(originalAbs.getCurrentStatus(),AbstractStatusDuplicated):
raise MaKaCError( _("cannot set as original abstract one which is already marked as duplicated"))
s=AbstractStatusDuplicated(self.getAbstract(),responsible,originalAbs,comments)
self.getAbstract().setCurrentStatus(s)
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
"""
"""
if self.getAbstract()==targetAbs:
raise MaKaCError( _("An abstract cannot be merged into itself"))
if targetAbs.getCurrentStatus().__class__ not in [AbstractStatusSubmitted,AbstractStatusUnderReview,AbstractStatusProposedToAccept,AbstractStatusProposedToReject,AbstractStatusInConflict]:
raise MaKaCError(_("Target abstract is in a status which cannot receive mergings"))
s=AbstractStatusMerged(self.getAbstract(),responsible,targetAbs,comments)
self.getAbstract().setCurrentStatus(s)
def unMerge(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only merged abstracts can be unmerged"))
def getComments(self):
return ""
class AbstractStatusSubmitted( AbstractStatus ):
"""
"""
def clone(self,abstract):
ass = AbstractStatusSubmitted(abstract)
return ass
def update( self ):
#if an abstract that has been submitted has no judgement it
# must remain in the submitted status
if self._abstract.getNumJudgements() == 0:
return
AbstractStatus.update( self )
class AbstractStatusAccepted(AbstractStatusAcceptedLegacyMixin, AbstractStatus):
"""
"""
def __init__(self, abstract, responsible, comments=""):
AbstractStatus.__init__(self, abstract)
self._setResponsible(responsible)
self._setComments(comments)
def clone(self, abstract):
return AbstractStatusAccepted(abstract, self.getResponsible(), self.getComments())
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
try:
if self._comments:
pass
except AttributeError:
self._comments = ""
return self._comments
def update( self ):
return
def accept(self,responsible,destTrack,type,comments="" ):
raise MaKaCError( _("Cannot accept an abstract which is already accepted"))
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is already accepted"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is already accepted"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is already accepted"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is already accepted"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is accepted"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is already accepted"))
class AbstractStatusRejected( AbstractStatus ):
"""
"""
def __init__( self, abstract, responsible, comments = "" ):
AbstractStatus.__init__( self, abstract )
self._setResponsible( responsible )
self._setComments( comments )
def clone(self,abstract):
asr = AbstractStatusRejected(abstract, self.getResponsible(), self.getComments())
return asr
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
try:
if self._comments:
pass
except AttributeError:
self._comments = ""
return self._comments
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is already rejected"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is already rejected"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is already rejected"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is already rejected"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw a REJECTED abstract"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is rejected"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is rejected"))
class AbstractStatusUnderReview( AbstractStatus ):
"""
"""
def clone(self,abstract):
asur = AbstractStatusUnderReview(abstract)
return asur
class AbstractStatusProposedToAccept( AbstractStatus ):
"""
"""
def clone(self, abstract):
aspta = AbstractStatusProposedToAccept(abstract)
return aspta
def getTrack(self):
jud=self.getAbstract().getTrackAcceptanceList()[0]
return jud.getTrack()
def getType(self):
jud=self.getAbstract().getTrackAcceptanceList()[0]
return jud.getContribType()
class AbstractStatusProposedToReject( AbstractStatus ):
"""
"""
def clone(self, abstract):
asptr = AbstractStatusProposedToReject(abstract)
return asptr
class AbstractStatusInConflict( AbstractStatus ):
"""
"""
def clone(self,abstract):
asic = AbstractStatusInConflict(abstract)
return asic
class AbstractStatusWithdrawn(AbstractStatus):
"""
"""
def __init__(self,abstract,responsible, prevStatus,comments=""):
AbstractStatus.__init__(self,abstract)
self._setComments(comments)
self._setResponsible(responsible)
self._prevStatus=prevStatus
def clone(self,abstract):
asw = AbstractStatusWithdrawn(abstract,self.getResponsible(),self.getComments())
return asw
def _setResponsible(self,newResp):
self._responsible=newResp
def getResponsible(self):
try:
if self._responsible:
pass
except AttributeError,e:
self._responsible=self._abstract.getSubmitter().getAvatar()
return self._responsible
def getPrevStatus(self):
try:
if self._prevStatus:
pass
except AttributeError,e:
self._prevStatus=None
return self._prevStatus
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def update( self ):
return
def accept(self,responsible,destTrack,type,comments=""):
raise MaKaCError( _("Cannot accept an abstract wich is withdrawn"))
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is withdrawn"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which withdrawn"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is withdrawn"))
def recover(self):
abstract = self.getAbstract()
contrib = abstract.as_new.contribution
if self.getPrevStatus() is None:
# reset all the judgments
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
# setting the status
if contrib is None:
s = AbstractStatusSubmitted(abstract)
else:
s = AbstractStatusAccepted(abstract, self.getResponsible(), "")
else:
if contrib is not None and not isinstance(self.getPrevStatus(), AbstractStatusAccepted):
s = AbstractStatusAccepted(abstract, self.getResponsible(), "")
else:
s = self.getPrevStatus()
abstract.setCurrentStatus(s)
abstract.as_new.accepted_track_id = int(contrib.track.id) if contrib.track else None
abstract.as_new.accepted_type = contrib.type
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is withdrawn"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is withdrawn"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("This abstract is already withdrawn"))
class AbstractStatusDuplicated(AbstractStatus):
"""
"""
def __init__( self,abstract,responsible,originalAbstract,comments=""):
AbstractStatus.__init__(self,abstract)
self._setResponsible(responsible)
self._setComments(comments)
self._setOriginalAbstract(originalAbstract)
def clone(self, abstract):
asd = AbstractStatusDuplicated(abstract,self.getResponsible(),self.getOriginal(),self.getComments())
return asd
def _setResponsible( self, res ):
self._responsible = res
def getResponsible(self):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def _setOriginalAbstract(self,abs):
self._original=abs
def getOriginal(self):
return self._original
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is duplicated"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is duplicated"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is duplicated"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is duplicated"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw a duplicated abstract"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("This abstract is already duplicated"))
def unMarkAsDuplicated(self,responsible,comments=""):
s = AbstractStatusSubmitted( self.getAbstract() )
self.getAbstract().setCurrentStatus( s )
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is marked as a duplicate"))
class AbstractStatusMerged(AbstractStatus):
"""
"""
def __init__(self,abstract,responsible,targetAbstract,comments=""):
AbstractStatus.__init__(self,abstract)
self._setResponsible(responsible)
self._setComments(comments)
self._setTargetAbstract(targetAbstract)
def clone(self,abstract):
asm = AbstractStatusMerged(abstract,self.getResponsible(),self.getTargetAbstract(),self.getComments())
return asm
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def _setTargetAbstract(self,abstract):
self._target=abstract
def getTargetAbstract(self):
return self._target
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is merged into another one"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is merged into another one"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is merged into another one"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is merged into another one"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw an abstract which is merged into another one"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is merged into another one"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,target,comments=""):
raise MaKaCError( _("This abstract is already merged into another one"))
def unMerge(self,responsible,comments=""):
s = AbstractStatusSubmitted( self.getAbstract() )
self.getAbstract().setCurrentStatus( s )
class AbstractStatusNone(AbstractStatus):
# This is a special status we assign to abstracts that are put in the trash can.
def __init__(self,abstract):
AbstractStatus.__init__(self,abstract)
def clone(self,abstract):
asn = AbstractStatusNone(abstract)
return asn
class NotificationTemplate(Persistent):
def __init__(self):
self._owner=None
self._id=""
self._name=""
self._description=""
self._tplSubject=""
self._tplBody=""
self._fromAddr = ""
self._CAasCCAddr = False
self._ccAddrList=PersistentList()
self._toAddrs = PersistentList()
self._conditions=PersistentList()
self._toAddrGenerator=Counter()
self._condGenerator=Counter()
def clone(self):
tpl = NotificationTemplate()
tpl.setName(self.getName())
tpl.setDescription(self.getDescription())
tpl.setTplSubject(self.getTplSubject())
tpl.setTplBody(self.getTplBody())
tpl.setFromAddr(self.getFromAddr())
tpl.setCAasCCAddr(self.getCAasCCAddr())
for cc in self.getCCAddrList() :
tpl.addCCAddr(cc)
for to in self.getToAddrList() :
tpl.addToAddr(to)
for con in self.getConditionList() :
tpl.addCondition(con.clone(tpl))
return tpl
def delete(self):
self.clearToAddrs()
self.clearCCAddrList()
self.clearConditionList()
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
## def getResponsible( self ):
## return self._responsible
##
## def _setComments( self, comments ):
## self._comments = str( comments ).strip()
##
## def getComments( self ):
## return self._comments
##
## def _setOriginalAbstract(self,abstract):
## self._original=abstract
def canModify(self, aw_or_user):
return self.getConference().canModify(aw_or_user)
def getLocator(self):
loc = self.getOwner().getConference().getLocator()
loc["notifTplId"] = self._id
return loc
def getConference(self):
return self._owner.getConference()
def includeInOwner(self,owner,id):
self._owner=owner
self._id=id
def getOwner(self):
return self._owner
def getId(self):
return self._id
def setName(self,newName):
self._name=newName.strip()
def getName(self):
return self._name
def setDescription(self,newDesc):
self._description=newDesc.strip()
def getDescription(self):
return self._description
def setTplSubject(self,newSubject, varList):
self._tplSubject=self.parseTplContent(newSubject, varList).strip()
def getTplSubject(self):
return self._tplSubject
def getTplSubjectShow(self, varList):
return self.parseTplContentUndo(self._tplSubject, varList)
def setTplBody(self,newBody, varList):
self._tplBody=self.parseTplContent(newBody, varList).strip()
def getTplBody(self):
return self._tplBody
def getTplBodyShow(self, varList):
return self.parseTplContentUndo(self._tplBody, varList)
def getCCAddrList(self):
try:
if self._ccAddrList:
pass
except AttributeError:
self._ccAddrList=PersistentList()
return self._ccAddrList
def addCCAddr(self,newAddr):
try:
if self._ccAddrList:
pass
except AttributeError:
self._ccAddrList=PersistentList()
ccAddr=newAddr.strip()
if ccAddr!="" and ccAddr not in self._ccAddrList:
self._ccAddrList.append(ccAddr)
def setCCAddrList(self,l):
self.clearCCAddrList()
for addr in l:
self.addCCAddr(addr)
def setCAasCCAddr(self, CAasCCAddr):
self._CAasCCAddr = CAasCCAddr
def getCAasCCAddr(self):
try:
if self._CAasCCAddr:
pass
except AttributeError:
self._CAasCCAddr = False
return self._CAasCCAddr
def clearCCAddrList(self):
self._ccAddrList=PersistentList()
def getFromAddr(self):
try:
return self._fromAddr
except AttributeError:
self._fromAddr = self._owner.getConference().getSupportInfo().getEmail()
return self._fromAddr
def setFromAddr(self, addr):
self._fromAddr = addr
def addToAddr(self,toAddr):
"""
"""
if self.hasToAddr(toAddr.__class__):
return
try:
if self._toAddrGenerator:
pass
except AttributeError, e:
self._toAddrGenerator = Counter()
id = toAddr.getId()
if id == -1:
id = int(self._toAddrGenerator.newCount())
toAddr.includeInTpl(self,id)
self.getToAddrList().append(toAddr)
def removeToAddr(self,toAddr):
"""
"""
if not self.hasToAddr(toAddr.__class__):
return
self.getToAddrList().remove(toAddr)
toAddr.includeInTpl(None,toAddr.getId())
toAddr.delete()
def recoverToAddr(self, toAddr):
self.addToAddr(toAddr)
toAddr.recover()
def getToAddrs(self, abs):
users = []
for toAddr in self.getToAddrList():
users += toAddr.getToAddrList(abs)
return users
def getToAddrList(self):
"""
"""
try:
if self._toAddrs:
pass
except AttributeError, e:
self._toAddrs = PersistentList()
return self._toAddrs
def getToAddrById(self,id):
"""
"""
for toAddr in self.getToAddrList():
if toAddr.getId()==int(id):
return toAddr
return None
def hasToAddr(self,toAddrKlass):
"""Returns True if the TPL contains a "toAddr" which class is "toAddrKlass"
"""
for toAddr in self.getToAddrList():
if toAddr.__class__ == toAddrKlass:
return True
return False
def clearToAddrs(self):
while(len(self.getToAddrList())>0):
self.removeToAddr(self.getToAddrList()[0])
def addCondition(self,cond):
"""
"""
if cond in self._conditions:
return
id = cond.getId()
if id == -1:
id = int(self._condGenerator.newCount())
cond.includeInTpl(self, id)
self._conditions.append(cond)
def removeCondition(self,cond):
"""
"""
if cond not in self._conditions:
return
self._conditions.remove(cond)
cond.delete()
def recoverCondition(self, cond):
self.addCondition(cond)
cond.recover()
def getConditionList(self):
"""
"""
return self._conditions
def getConditionById(self,id):
"""
"""
for cond in self._conditions:
if cond.getId()==int(id):
return cond
return None
def clearConditionList(self):
while(len(self.getConditionList())>0):
self.removeCondition(self.getConditionList()[0])
def satisfies(self,abs):
"""
"""
for cond in self._conditions:
if cond.satisfies(abs):
return True
return False
def parseTplContent(self, content, varList):
# replace the % in order to avoid exceptions
result = content.replace("%", "%%")
# find the vars and make the expressions, it is necessary to do in reverse in order to find the longest tags first
for var in varList:
result = result.replace("{"+var.getName()+"}", "%("+var.getName()+")s")
return result
def parseTplContentUndo(self, content, varList):
# The body content is shown without "%()" and with "%" in instead of "%%" but it is not modified
result = content
for var in varList:
result = result.replace("%("+var.getName()+")s", "{"+var.getName()+"}")
# replace the %% by %
result = result.replace("%%", "%")
return result
def getModifKey( self ):
return self.getConference().getModifKey()
class NotifTplToAddr(Persistent):
"""
"""
def __init__(self):
self._tpl=None
self._id=-1
def clone(self):
ntta = NotifTplToAddr()
return ntta
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def includeInTpl(self,newTpl,newId):
self._tpl=newTpl
self._id=newId
def getTpl(self):
return self._tpl
def getId(self):
return self._id
def getToAddrList(self,absList):
"""
Return a list with all the emails for a group.
"""
return []
class NotifTplToAddrSubmitter(NotifTplToAddr):
def getToAddrList(self,abs):
l = []
l.append(abs.getSubmitter())
return l
def clone(self):
nttas = NotifTplToAddrSubmitter()
return nttas
class NotifTplToAddrPrimaryAuthors(NotifTplToAddr):
def getToAddrList(self,abs):
l = []
for pa in abs.getPrimaryAuthorList():
l.append(pa)
return l
def clone(self):
nttapa = NotifTplToAddrPrimaryAuthors()
return nttapa
class NotifTplCondition(Persistent):
"""
"""
def __init__(self):
self._tpl=None
self._id=-1
def clone(self, template):
con = NotifyCondition()
con.includeInTpl(template)
return con
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def includeInTpl(self,newTpl,newId):
self._tpl=newTpl
self._id=newId
def getTpl(self):
return self._tpl
def getId(self):
return self._id
def satisfies(self,abs):
return True
class NotifTplCondAccepted(NotifTplCondition):
def __init__(self, track="--any--", contribType="--any--"):
NotifTplCondition.__init__(self)
self._track = track
self._contrib_type_id = contribType if isinstance(contribType, basestring) else contribType.id
def clone(self, conference, template):
ntca = NotifTplCondAccepted()
for newtrack in conference.getTrackList() :
if newtrack.getTitle() == self.getTrack().getTitle() :
ntca.setTrack(newtrack)
for newtype in conference.as_event.contribution_types:
if newtype.name == self.getContribType():
ntca.setContribType(newtype)
return ntca
def setContribType(self, ct="--any--"):
self._contrib_type_id = '--any--' if ct == '--any--' else ct.id
def getContribType(self):
# Ugly, but only way to handle '--any--'
return (ContributionType.get(self._contrib_type_id) if isinstance(self._contrib_type_id, int)
else self._contrib_type_id)
def setTrack(self, tr="--any--"):
self._track = tr
def getTrack(self):
try:
if self._track:
pass
except AttributeError:
self._track="--any--"
return self._track
def _satifiesContribType(self, abs_wrap):
abstract_type = abs_wrap.getCurrentStatus().getAbstract().as_new.accepted_type
if self._contrib_type_id == "--any--":
return True
else:
if self._contrib_type_id == '--none--':
return not abstract_type
if not abstract_type:
return False
# TODO: use ids in db, instead of objects!
return abstract_type.id == self._contrib_type_id
return False
def _satifiesTrack(self, abs_wrap):
accepted_track = abs_wrap.getCurrentStatus().getTrack()
target_track = self.getTrack()
if target_track == "--any--":
return True
else:
if not target_track or target_track == '--none--':
return not bool(accepted_track)
return accepted_track == target_track
return False
def satisfies(self,abs):
if not isinstance(abs.getCurrentStatus(), AbstractStatusAccepted):
return False
else:
return self._satifiesContribType(abs) and self._satifiesTrack(abs)
class NotifTplCondRejected(NotifTplCondition):
def satisfies(self,abs):
return isinstance(abs.getCurrentStatus(),AbstractStatusRejected)
def clone(self, conference, template):
ntcr = NotifTplCondRejected()
ntcr.includeInTpl(template)
return ntcr
class NotifTplCondMerged(NotifTplCondition):
def __init__(self, track=None, contrib_type=None):
NotifTplCondition.__init__(self)
self._track = track
self._contrib_type_id = contrib_type if isinstance(contrib_type, basestring) else contrib_type.id
def satisfies(self, abstract):
if not isinstance(abstract.getCurrentStatus(), AbstractStatusMerged):
return False
else:
return self._satisfiesContribType(abstract) and self._satisfiesTrack(abstract)
def _satisfiesContribType(self, abs_wrap):
if self._contrib_type_id == '--any--':
return True
else:
abstract_type = abs_wrap.getCurrentStatus().getAbstract().as_new.type
if self._contrib_type_id == '--none--':
return not abstract_type
if not abstract_type:
return False
# TODO: use ids in db, instead of objects!
return abstract_type.id == int(self._contrib_type_id)
def _satisfiesTrack(self, abs_wrap):
target_track = self.getTrack()
if target_track == "--any--":
return True
else:
tracks = abs_wrap.getCurrentStatus().getAbstract().getTrackListSorted()
if not target_track or target_track == '--none--':
return not tracks
return target_track in tracks
def getTrack(self):
return self._track
def getContribType(self):
# Ugly, but only way to handle '--any--'
return (ContributionType.get(self._contrib_type_id) if isinstance(self._contrib_type_id, int)
else self._contrib_type_id)
def clone(self, conference, template):
ntcm = NotifTplCondMerged()
for newtrack in conference.getTrackList():
if newtrack.getTitle() == self.getTrack().getTitle():
ntcm.setTrack(newtrack)
break
for newtype in conference.as_event.contribution_types:
if newtype.name == self.getContribType():
ntcm.setContribType(newtype)
break
return ntcm
class NotificationLog(Persistent):
def __init__(self,abstract):
self._abstract=abstract
self._entries=PersistentList()
def getAbstract(self):
return self._abstract
def addEntry(self,newEntry):
if newEntry!=None and newEntry not in self._entries:
self._entries.append(newEntry)
def getEntryList(self):
return self._entries
# The 3 following metods are used only for recovery purposes:
def removeEntry(self, entry):
if entry!=None and entry in self._entries:
self._entries.remove(entry)
entry.delete()
def recoverEntry(self, entry):
self.addEntry(entry)
entry.recover()
def clearEntryList(self):
while len(self.getEntryList()) > 0:
self.removeEntry(self.getEntryList()[0])
# -----------------------------------------------------------
class NotifLogEntry(Persistent):
def __init__(self,responsible,tpl):
self._setDate(nowutc())
self._setResponsible(responsible)
self._setTpl(tpl)
def _setDate(self,newDate):
self._date=newDate
def getDate(self):
return self._date
def _setResponsible(self,newResp):
self._responsible=newResp
def getResponsible(self):
return self._responsible
def _setTpl(self,newTpl):
self._tpl=newTpl
def getTpl(self):
return self._tpl
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
| belokop/indico_bare | indico/MaKaC/review.py | Python | gpl-3.0 | 118,820 |
# vim: fileencoding=UTF-8:expandtab:autoindent:ts=4:sw=4:sts=4
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# To import from calibre, some things need to be added to `sys` first. Do not import
# anything from calibre or the plugins yet.
import glob
import os
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(test_dir)
test_libdir = os.path.join(
src_dir, "pylib", "python{major}".format(major=sys.version_info.major)
)
sys.path += glob.glob(os.path.join(test_libdir, "*.zip"))
try:
from unittest import mock
except ImportError:
# Python 2
import mock
from calibre_plugins.kobotouch_extended import common
from polyglot.builtins import unicode_type
LANGUAGES = ("en_CA", "fr_CA", "fr_FR", "de_DE", "ar_EG", "ru_RU")
TEST_STRINGS = [
{
"encodings": {"UTF-8", "CP1252"},
"test_strings": [
unicode_type(s) for s in ["Hello, World!", "J'ai trouvé mon livre préféré"]
],
},
{
"encodings": {"UTF-8", "CP1256"},
"test_strings": [unicode_type(s) for s in ["مرحبا بالعالم"]],
},
{
"encodings": {"UTF-8", "CP1251"},
"test_strings": [unicode_type(s) for s in ["Привет мир"]],
},
{
"encodings": {"UTF-8", "CP932"},
"test_strings": [unicode_type(s) for s in ["こんにちは世界"]],
},
]
TEST_TIME = "2020-04-01 01:02:03"
def gen_lang_code():
encodings = set()
for o in TEST_STRINGS:
encodings |= o["encodings"]
for enc in encodings:
yield enc
class TestCommon(unittest.TestCase):
orig_lang = "" # type: str
def setUp(self): # type: () -> None
self.orig_lang = os.environ.get("LANG", None)
def tearDown(self): # type: () -> None
if not self.orig_lang:
if "LANG" in os.environ:
del os.environ["LANG"]
else:
os.environ["LANG"] = self.orig_lang
self.orig_lang = ""
def test_logger_log_level(self): # type: () -> None
for envvar in ("CALIBRE_DEVELOP_FROM", "CALIBRE_DEBUG"):
if envvar in os.environ:
del os.environ[envvar]
logger = common.Logger()
self.assertEqual(logger.log_level, "INFO")
os.environ["CALIBRE_DEVELOP_FROM"] = "true"
logger = common.Logger()
self.assertEqual(logger.log_level, "DEBUG")
del os.environ["CALIBRE_DEVELOP_FROM"]
os.environ["CALIBRE_DEBUG"] = "1"
logger = common.Logger()
self.assertEqual(logger.log_level, "DEBUG")
del os.environ["CALIBRE_DEBUG"]
def _run_logger_unicode_test(self, as_bytes): # type: (bool) -> None
for o in TEST_STRINGS:
for enc in o["encodings"]:
with mock.patch(
"calibre_plugins.kobotouch_extended.common.preferred_encoding", enc
), mock.patch(
"calibre_plugins.kobotouch_extended.common.time.strftime",
mock.MagicMock(return_value=TEST_TIME),
):
logger = common.Logger()
for msg in o["test_strings"]:
test_tagged = logger._tag_args("DEBUG", msg)
self.assertListEqual(
test_tagged,
[
"{timestr} [{level}] {msg}".format(
timestr=TEST_TIME, level="DEBUG", msg=msg
),
],
)
def test_logger_ensure_unicode_from_bytes(self): # type: () -> None
self._run_logger_unicode_test(True)
self._run_logger_unicode_test(False)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger.print_formatted_log",
mock.MagicMock(),
)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger._prints", mock.MagicMock(),
)
@mock.patch(
"calibre_plugins.kobotouch_extended.common.Logger._tag_args",
mock.MagicMock(return_value="Goodbye, World"),
)
def test_logger_logs(self):
logger = common.Logger()
logger.debug("Hello, World")
logger.print_formatted_log.assert_called_with("DEBUG", "Hello, World")
logger("Hello, World")
logger.print_formatted_log.assert_called_with("INFO", "Hello, World")
logger.print_formatted_log.reset_mock()
logger._prints.reset_mock()
logger._tag_args.reset_mock()
logger.exception("Oh noes!")
logger._tag_args.assert_called_with("ERROR", "Oh noes!")
self.assertEqual(logger._prints.call_count, 2)
if __name__ == "__main__":
unittest.main(module="test_common", verbosity=2)
| NiLuJe/calibre-kobo-driver | tests/test_common.py | Python | gpl-3.0 | 4,914 |
import re
simple_cmd_match = re.compile(r'\\([^\\]+?)\{(.*?)\}')
graphics_cmd_match = re.compile(r'\\includegraphics\[.*?\]?\{(.*?)\}')
begin_cmd_match = re.compile(r'\\begin{([^}]+?)}(?:(?:\[([^\]]+?)\])|.*)')
newcmd_match = re.compile(r'\\.+?\{(.*?)\}\{(.*)\}')
# newcmd_match_with_var = re.compile(r'\\[^\\]+?\{(.*?)\}\{(.*?)\}')
vars_match = re.compile(r'\{(.+?)\}')
def get_vars(line):
res = list()
open_braces = 0
one_var = ''
for char in line.strip():
if char == '}':
open_braces -= 1
if open_braces > 0:
one_var += char
elif open_braces == 0 and one_var:
res.append(one_var)
one_var = ''
if char == '{':
open_braces += 1
return res
class FileIter:
def __init__(self, filename):
self.fn = filename
self.f = open(self.fn, 'r')
def get_line(self):
for line in self.f:
yield line
self.f.close()
| floriangeigl/arxiv_converter | tex_utils.py | Python | gpl-3.0 | 969 |
# -*- coding: utf-8 -*-
"""Common message dialogs"""
import os
from gi.repository import GLib, Gtk, Gdk, GObject
from lutris import api, pga, runtime, settings
from lutris.gui.widgets import DownloadProgressBox
from lutris.util import datapath
class GtkBuilderDialog(GObject.Object):
def __init__(self, parent=None, **kwargs):
super(GtkBuilderDialog, self).__init__()
ui_filename = os.path.join(datapath.get(), 'ui',
self.glade_file)
if not os.path.exists(ui_filename):
raise ValueError("ui file does not exists: %s" % ui_filename)
self.builder = Gtk.Builder()
self.builder.add_from_file(ui_filename)
self.dialog = self.builder.get_object(self.dialog_object)
self.builder.connect_signals(self)
if parent:
self.dialog.set_transient_for(parent)
self.dialog.show_all()
self.initialize(**kwargs)
def initialize(self, **kwargs):
pass
def on_close(self, *args):
self.dialog.destroy()
def on_response(self, widget, response):
if response == Gtk.ResponseType.DELETE_EVENT:
try:
self.dialog.hide()
except AttributeError:
pass
class AboutDialog(GtkBuilderDialog):
glade_file = 'about-dialog.ui'
dialog_object = "about_dialog"
def initialize(self):
self.dialog.set_version(settings.VERSION)
class NoticeDialog(Gtk.MessageDialog):
"""Display a message to the user."""
def __init__(self, message, parent=None):
super(NoticeDialog, self).__init__(buttons=Gtk.ButtonsType.OK, parent=parent)
self.set_markup(message)
self.run()
self.destroy()
class ErrorDialog(Gtk.MessageDialog):
"""Display an error message."""
def __init__(self, message, secondary=None, parent=None):
super(ErrorDialog, self).__init__(buttons=Gtk.ButtonsType.OK, parent=parent)
self.set_markup(message)
if secondary:
self.format_secondary_text(secondary)
self.run()
self.destroy()
class QuestionDialog(Gtk.MessageDialog):
"""Ask the user a question."""
YES = Gtk.ResponseType.YES
NO = Gtk.ResponseType.NO
def __init__(self, settings):
super(QuestionDialog, self).__init__(
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.YES_NO
)
self.set_markup(settings['question'])
self.set_title(settings['title'])
self.result = self.run()
self.destroy()
class DirectoryDialog(Gtk.FileChooserDialog):
"""Ask the user to select a directory."""
def __init__(self, message, parent=None):
super(DirectoryDialog, self).__init__(
title=message,
action=Gtk.FileChooserAction.SELECT_FOLDER,
buttons=('_Cancel', Gtk.ResponseType.CLOSE,
'_OK', Gtk.ResponseType.OK),
parent=parent
)
self.result = self.run()
self.folder = self.get_current_folder()
self.destroy()
class FileDialog(Gtk.FileChooserDialog):
"""Ask the user to select a file."""
def __init__(self, message=None, default_path=None):
self.filename = None
if not message:
message = "Please choose a file"
super(FileDialog, self).__init__(
message, None, Gtk.FileChooserAction.OPEN,
('_Cancel', Gtk.ResponseType.CANCEL,
'_OK', Gtk.ResponseType.OK)
)
if default_path and os.path.exists(default_path):
self.set_current_folder(default_path)
self.set_local_only(False)
response = self.run()
if response == Gtk.ResponseType.OK:
self.filename = self.get_filename()
self.destroy()
class DownloadDialog(Gtk.Dialog):
"""Dialog showing a download in progress."""
def __init__(self, url=None, dest=None, title=None, label=None,
downloader=None):
Gtk.Dialog.__init__(self, title or "Downloading file")
self.set_size_request(485, 104)
self.set_border_width(12)
params = {'url': url,
'dest': dest,
'title': label or "Downloading %s" % url}
self.download_box = DownloadProgressBox(params, downloader=downloader)
self.download_box.connect('complete', self.download_complete)
self.download_box.connect('cancel', self.download_cancelled)
self.connect('response', self.on_response)
self.get_content_area().add(self.download_box)
self.show_all()
self.download_box.start()
def download_complete(self, _widget, _data):
self.response(Gtk.ResponseType.OK)
self.destroy()
def download_cancelled(self, _widget, data):
self.response(Gtk.ResponseType.CANCEL)
self.destroy()
def on_response(self, dialog, response):
if response == Gtk.ResponseType.DELETE_EVENT:
self.download_box.downloader.cancel()
self.destroy()
class RuntimeUpdateDialog(Gtk.Dialog):
"""Dialog showing the progress of ongoing runtime update."""
def __init__(self, parent=None):
Gtk.Dialog.__init__(self, "Runtime updating", parent=parent)
self.set_size_request(360, 104)
self.set_border_width(12)
progress_box = Gtk.Box()
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_margin_top(40)
self.progressbar.set_margin_bottom(40)
self.progressbar.set_margin_right(20)
self.progressbar.set_margin_left(20)
progress_box.pack_start(self.progressbar, True, True, 0)
self.get_content_area().add(progress_box)
GLib.timeout_add(200, self.on_runtime_check)
self.show_all()
def on_runtime_check(self, *args, **kwargs):
self.progressbar.pulse()
if not runtime.is_updating():
self.response(Gtk.ResponseType.OK)
self.destroy()
return False
return True
class PgaSourceDialog(GtkBuilderDialog):
glade_file = 'dialog-pga-sources.ui'
dialog_object = 'pga_dialog'
def __init__(self):
super(PgaSourceDialog, self).__init__()
# GtkBuilder Objects
self.sources_selection = self.builder.get_object("sources_selection")
self.sources_treeview = self.builder.get_object("sources_treeview")
self.remove_source_button = self.builder.get_object(
"remove_source_button"
)
# Treeview setup
self.sources_liststore = Gtk.ListStore(str)
renderer = Gtk.CellRendererText()
renderer.set_padding(4, 10)
uri_column = Gtk.TreeViewColumn("URI", renderer, text=0)
self.sources_treeview.append_column(uri_column)
self.sources_treeview.set_model(self.sources_liststore)
sources = pga.read_sources()
for index, source in enumerate(sources):
self.sources_liststore.append((source, ))
self.remove_source_button.set_sensitive(False)
self.dialog.show_all()
@property
def sources_list(self):
return [source[0] for source in self.sources_liststore]
def on_apply(self, widget, data=None):
pga.write_sources(self.sources_list)
self.on_close(widget, data)
def on_add_source_button_clicked(self, widget, data=None):
chooser = Gtk.FileChooserDialog(
"Select directory", self.dialog,
Gtk.FileChooserAction.SELECT_FOLDER,
('_Cancel', Gtk.ResponseType.CANCEL,
'_OK', Gtk.ResponseType.OK)
)
chooser.set_local_only(False)
response = chooser.run()
if response == Gtk.ResponseType.OK:
uri = chooser.get_uri()
if uri not in self.sources_list:
self.sources_liststore.append((uri, ))
chooser.destroy()
def on_remove_source_button_clicked(self, widget, data=None):
"""Remove a source."""
(model, treeiter) = self.sources_selection.get_selected()
if treeiter:
# TODO : Add confirmation
model.remove(treeiter)
def on_sources_selection_changed(self, widget, data=None):
"""Set sentivity of remove source button."""
(model, treeiter) = self.sources_selection.get_selected()
self.remove_source_button.set_sensitive(treeiter is not None)
class ClientLoginDialog(GtkBuilderDialog):
glade_file = 'dialog-lutris-login.ui'
dialog_object = 'lutris-login'
__gsignals__ = {
'connected': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_PYOBJECT,)),
'cancel': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_PYOBJECT,))
}
def __init__(self, parent):
super(ClientLoginDialog, self).__init__(parent=parent)
self.parent = parent
self.username_entry = self.builder.get_object('username_entry')
self.password_entry = self.builder.get_object('password_entry')
cancel_button = self.builder.get_object('cancel_button')
cancel_button.connect('clicked', self.on_close)
connect_button = self.builder.get_object('connect_button')
connect_button.connect('clicked', self.on_connect)
def get_credentials(self):
username = self.username_entry.get_text()
password = self.password_entry.get_text()
return (username, password)
def on_username_entry_activate(self, widget):
if all(self.get_credentials()):
self.on_connect(None)
else:
self.password_entry.grab_focus()
def on_password_entry_activate(self, widget):
if all(self.get_credentials()):
self.on_connect(None)
else:
self.username_entry.grab_focus()
def on_connect(self, widget):
username, password = self.get_credentials()
token = api.connect(username, password)
if not token:
NoticeDialog("Login failed", parent=self.parent)
else:
self.emit('connected', username)
self.dialog.destroy()
class ClientUpdateDialog(GtkBuilderDialog):
glade_file = 'dialog-client-update.ui'
dialog_object = "client_update_dialog"
def on_open_downloads_clicked(self, _widget):
Gtk.show_uri(None, "http://lutris.net", Gdk.CURRENT_TIME)
class NoInstallerDialog(Gtk.MessageDialog):
MANUAL_CONF = 1
NEW_INSTALLER = 2
EXIT = 4
def __init__(self, parent=None):
Gtk.MessageDialog.__init__(self, parent, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.NONE,
"Unable to install the game")
self.format_secondary_text("No installer is available for this game")
self.add_buttons("Configure manually", self.MANUAL_CONF,
"Write installer", self.NEW_INSTALLER,
"Close", self.EXIT)
self.result = self.run()
self.destroy()
| RobLoach/lutris | lutris/gui/dialogs.py | Python | gpl-3.0 | 11,016 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class MyuniversityscraperPipeline(object):
def process_item(self, item, spider):
return item
| systemovich/scrapy-myuniversity | myuniversityscraper/pipelines.py | Python | gpl-3.0 | 273 |
# Copyright (c) 2015 Aaron Kehrer
# Licensed under the terms of the MIT License
# (see fiddle/__init__.py for details)
import os
import unicodedata
from io import StringIO
from PyQt4 import QtCore, QtGui
from fiddle.config import EDITOR_FONT, EDITOR_FONT_SIZE
class PyConsoleTextBrowser(QtGui.QTextBrowser):
def __init__(self, parent=None, process=None):
super(PyConsoleTextBrowser, self).__init__(parent)
self.process = process
# The start position in the QTextBrowser document where new user input will be inserted
self._input_insert_pos = -1
self.history = []
self.history_idx = 0
self.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.setAcceptRichText(False)
self.setReadOnly(False)
self.setOpenExternalLinks(False)
self.setOpenLinks(False)
self.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse | QtCore.Qt.TextEditorInteraction)
def keyPressEvent(self, event):
if self.process is not None:
# Skip keys modified with Ctrl or Alt
if event.modifiers() != QtCore.Qt.ControlModifier and event.modifiers() != QtCore.Qt.AltModifier:
# Get the insert cursor and make sure it's at the end of the console
cursor = self.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
if self._input_insert_pos < 0:
self._input_insert_pos = cursor.position()
# Scroll view to end of console
self.setTextCursor(cursor)
self.ensureCursorVisible()
# Process the key event
if event.key() == QtCore.Qt.Key_Up:
# Clear any previous input
self._clear_insert_line(cursor)
# Get the history
if len(self.history) > 0:
self.history_idx -= 1
try:
cursor.insertText(self.history[self.history_idx])
except IndexError:
self.history_idx += 1
cursor.insertText('')
elif event.key() == QtCore.Qt.Key_Down:
# Clear any previous input
self._clear_insert_line(cursor)
# Get the history
if len(self.history) > 0 >= self.history_idx:
self.history_idx += 1
try:
cursor.insertText(self.history[self.history_idx])
except IndexError:
self.history_idx -= 1
cursor.insertText('')
elif event.key() == QtCore.Qt.Key_Return:
txt = self._select_insert_line(cursor)
self.process.write('{0}\n'.format(txt).encode('utf-8'))
# Reset the insert position
self._input_insert_pos = -1
# Update the history
self.history.append(txt)
self.history_idx = 0
# Pass the event on to the parent for handling
return QtGui.QTextBrowser.keyPressEvent(self, event)
def _clear_insert_line(self, cursor):
"""
Remove all the displayed text from the input insert line and clear the input buffer
"""
cursor.setPosition(self._input_insert_pos, QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
def _select_insert_line(self, cursor):
cursor.setPosition(self._input_insert_pos, QtGui.QTextCursor.KeepAnchor)
txt = cursor.selectedText()
cursor.clearSelection()
return txt
class PyConsoleLineEdit(QtGui.QLineEdit):
"""
https://wiki.python.org/moin/PyQt/Adding%20tab-completion%20to%20a%20QLineEdit
http://www.saltycrane.com/blog/2008/01/how-to-capture-tab-key-press-event-with/
"""
def __init__(self):
super(PyConsoleLineEdit, self).__init__()
line_font = QtGui.QFont()
line_font.setFamily(EDITOR_FONT)
line_font.setPointSize(EDITOR_FONT_SIZE)
self.setFont(line_font)
self.history = []
self.history_idx = -1
def event(self, event):
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == QtCore.Qt.Key_Tab:
if self.text().strip() == '':
self.setText(self.text() + ' ')
return True
elif event.key() == QtCore.Qt.Key_Up:
if len(self.history) > 0 and self.history_idx > 0:
self.history_idx -= 1
self.setText(self.history[self.history_idx])
return True
elif event.key() == QtCore.Qt.Key_Down:
if 0 < len(self.history) > self.history_idx:
self.history_idx += 1
try:
self.setText(self.history[self.history_idx])
except IndexError:
self.setText('')
return True
elif event.key() == QtCore.Qt.Key_Return:
try:
if self.history[-1] != self.text():
self.history.append(self.text())
except IndexError:
self.history.append(self.text())
self.history_idx = len(self.history)
return QtGui.QLineEdit.event(self, event)
return QtGui.QLineEdit.event(self, event)
| akehrer/fiddle | fiddle/controllers/PyConsole.py | Python | gpl-3.0 | 5,579 |
# TODO : Faire l'agent destructeur
| vportascarta/UQAC-8INF844-SPHERO | agents/Destructor.py | Python | gpl-3.0 | 35 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 Thomas Jost and the Contributors
#
# This file is part of git-annex-remote-hubic.
#
# git-annex-remote-hubic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# git-annex-remote-hubic is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# git-annex-remote-hubic. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
setup(name="git-annex-remote-hubic",
version="0.3.2",
description="A git-annex special remote for hubiC",
long_description=open("README.md", "r").read(),
author="Thomas Jost",
author_email="schnouki@schnouki.net",
url="https://github.com/Schnouki/git-annex-remote-hubic",
packages=find_packages(),
install_requires=[
"python-dateutil",
"python-swiftclient>=2.1.0",
"rauth>=0.7",
],
entry_points={
"console_scripts": [
"git-annex-remote-hubic = hubic_remote.main:main",
"git-annex-remote-hubic-migrate = hubic_remote.migrate:main",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Plugins",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 2",
"Topic :: System :: Archiving",
],
)
| Schnouki/git-annex-remote-hubic | setup.py | Python | gpl-3.0 | 1,813 |
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.
# It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def shell():
from IPython.core.interactiveshell import InteractiveShell
shell = InteractiveShell.instance()
shell.magic("load_ext pylada")
return shell
def Extract(outdir=None):
from os.path import exists
from os import getcwd
from collections import namedtuple
from pickle import load
from pylada.misc import chdir
if outdir == None:
outdir = getcwd()
Extract = namedtuple("Extract", ["success", "directory", "indiv", "functional"])
if not exists(outdir):
return Extract(False, outdir, None, functional)
with chdir(outdir):
if not exists("OUTCAR"):
return Extract(False, outdir, None, functional)
with open("OUTCAR", "rb") as file:
indiv, value = load(file)
return Extract(True, outdir, indiv, functional)
def call_functional(indiv, outdir=None, value=False, **kwargs):
from pylada.misc import local_path
from pickle import dump
path = local_path(outdir)
path.ensure(dir=True)
dump((indiv, value), path.join("OUTCAR").open("wb"))
return Extract(outdir)
call_functional.Extract = Extract
@fixture
def functional():
return call_functional
| pylada/pylada-light | tests/ipython/conftest.py | Python | gpl-3.0 | 2,390 |
#!/usr/bin/python2
import collections
import os
from loranode import RN2483Controller
# from ../_examplify.py import Examplify
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
from _examplify import Examplify
import lora, pmt, osmosdr
from gnuradio import gr, blocks
class ReceiveWhitening:
def __init__(self, sf = 7, output_file = './test_out.csv'):
self.target_freq = 868.1e6
self.sf = sf
self.samp_rate = 1e6
self.capture_freq = 868.0e6
self.offset = -(self.capture_freq - self.target_freq)
self.inputFile = './'
self.outputFile = output_file
self.tempFile = '/tmp/whitening_out'
self.tb = None
def captureSequence(self, inputFile):
self.inputFile = inputFile
if os.path.isfile(self.inputFile):
self.tb = gr.top_block()
self.file_source = blocks.file_source(gr.sizeof_gr_complex*1, self.inputFile, False) # Repeat input: True/False
self.lora_lora_receiver_0 = lora.lora_receiver(self.samp_rate, self.capture_freq, self.offset, self.sf, self.samp_rate)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, self.samp_rate, True)
self.tb.connect( (self.file_source, 0), (self.blocks_throttle_0, 0))
self.tb.connect( (self.blocks_throttle_0, 0), (self.lora_lora_receiver_0, 0))
self.tb.run()
self.tb = None
if os.path.isfile(self.tempFile):
if os.path.isfile(self.outputFile):
inf = open(self.tempFile, 'r')
seq = inf.read()
# print(seq)
out = open(self.outputFile, 'a')
out.write(seq)
out.close()
inf.close()
else:
raise Exception("[ReceiveWhitening] Outputfile '" + self.outputFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Tempfile '" + self.tempFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Inputfile '" + self.inputFile + "' does not exist!")
if __name__ == '__main__':
ofile = '/tmp/tmp_whitening.cfile'
testset = [ (7, "4/6"), (7, "4/7"), (8, "4/5"), (12, "4/6"), (9, "4/5"), (10, "4/5"), (11, "4/5"), (6, "4/5")]
for settings in testset:
dataf = './test_out_SF{0:d}_CR{1:s}.csv'.format(settings[0], '-'.join(settings[1].split('/')))
out = open(dataf, 'a')
out.close()
examplifr = Examplify(settings[0], settings[1], gains = [32, 38, 38])
whitening = ReceiveWhitening(settings[0], dataf)
for i in range(8):
print("Sample {0:d} of 16".format(i))
examplifr.transmitToFile(['0' * 256] * 4, ofile)
whitening.captureSequence(ofile)
for i in range(8):
print("Sample {0:d} of 16".format(i + 8))
examplifr.transmitToFile(['0' * 256] * 8, ofile)
whitening.captureSequence(ofile)
examplifr = None
whitening = None
| Wosser1sProductions/gr-lora | examples/lora-whitening/createWhiteningValues.py | Python | gpl-3.0 | 3,178 |
from vadvisor.store.event import InMemoryStore
import pytest
from freezegun import freeze_time
from datetime import datetime, timedelta
@pytest.fixture
@freeze_time("2012-01-14 03:00:00")
def expired_store():
store = InMemoryStore(60)
# Insert old data
store.put('old')
store.put('old')
store.put('old')
return store
@pytest.fixture
@freeze_time("2012-01-14 03:01:30")
def new_store(expired_store):
# Insert newer data
expired_store.put('new')
expired_store.put('new')
expired_store.put('new')
return expired_store
@pytest.fixture
@freeze_time("2012-01-14 03:01:50")
def newest_store(new_store):
# Insert newer data
new_store.put('newest')
new_store.put('newest')
new_store.put('newest')
return new_store
def test_empty_store():
store = InMemoryStore()
assert store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_expire_on_get(expired_store):
expired_store.get()
assert expired_store.get() == []
@freeze_time("2012-01-14 03:02:00")
def test_get_all_new(new_store):
assert new_store.get() == ['new', 'new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_two_new(new_store):
assert new_store.get(elements=2) == ['new', 'new']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_older_than(newest_store):
events = newest_store.get(
elements=2,
start_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['newest', 'newest']
@freeze_time("2012-01-14 03:02:00")
def test_get_not_newer_than(newest_store):
events = newest_store.get(
elements=2,
stop_time=datetime.utcnow() - timedelta(seconds=20)
)
assert events == ['new', 'new']
| kubevirt/vAdvisor | tests/store/test_event.py | Python | gpl-3.0 | 1,720 |
"""
__TMWQuestDragonEggActions_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: bogdan
Modified: Wed May 2 00:27:03 2018
______________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from OrgUnit import *
from Role import *
from Action import *
from IndividualKnArt import *
from Objective import *
from isPartOfOrgUnit import *
from canHaveRole import *
from hasActions import *
from canAccessKnArt import *
from isPartOfObjective import *
from hasObjective import *
from precedentTo import *
from graph_canHaveRole import *
from graph_canAccessKnArt import *
from graph_isPartOfOrgUnit import *
from graph_Action import *
from graph_precedentTo import *
from graph_Objective import *
from graph_hasObjective import *
from graph_Role import *
from graph_OrgUnit import *
from graph_IndividualKnArt import *
from graph_isPartOfObjective import *
from graph_hasActions import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def TMWQuestDragonEggActions_MDL(self, rootNode, LSMASOMMRootNode=None):
# --- Generating attributes code for ASG LSMASOMM ---
if( LSMASOMMRootNode ):
# agentImplementation
LSMASOMMRootNode.agentImplementation.setValue( (['SPADE', 'Enmasse', 'EveJS'], 0) )
LSMASOMMRootNode.agentImplementation.config = 0
# author
LSMASOMMRootNode.author.setValue('Annonymous')
# description
LSMASOMMRootNode.description.setValue('\n')
LSMASOMMRootNode.description.setHeight(15)
# name
LSMASOMMRootNode.name.setValue('TMW')
# title
LSMASOMMRootNode.title.setValue('QuestDragonEgg')
# --- ASG attributes over ---
self.obj118=OrgUnit(self)
self.obj118.isGraphObjectVisual = True
if(hasattr(self.obj118, '_setHierarchicalLink')):
self.obj118._setHierarchicalLink(False)
# Individual
self.obj118.Individual.setValue(('1', 0))
self.obj118.Individual.config = 0
# hasActions
self.obj118.hasActions.setActionFlags([ 1, 1, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('ChangeRole', 20)
lcobj2.append(cobj2)
self.obj118.hasActions.setValue(lcobj2)
# ID
self.obj118.ID.setValue('OU|0')
# name
self.obj118.name.setValue('Avatar')
# UnitSize
self.obj118.UnitSize.setValue('Individual')
self.obj118.graphClass_= graph_OrgUnit
if self.genGraphics:
new_obj = graph_OrgUnit(530.0,890.0,self.obj118)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("OrgUnit", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj118.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj118)
self.globalAndLocalPostcondition(self.obj118, rootNode)
self.obj118.postAction( rootNode.CREATE )
self.obj119=OrgUnit(self)
self.obj119.isGraphObjectVisual = True
if(hasattr(self.obj119, '_setHierarchicalLink')):
self.obj119._setHierarchicalLink(False)
# Individual
self.obj119.Individual.setValue(('1', 0))
self.obj119.Individual.config = 0
# hasActions
self.obj119.hasActions.setActionFlags([ 1, 1, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('ChangeRole', 20)
lcobj2.append(cobj2)
self.obj119.hasActions.setValue(lcobj2)
# ID
self.obj119.ID.setValue('OU|1')
# name
self.obj119.name.setValue('Party')
# UnitSize
self.obj119.UnitSize.setValue('Group')
self.obj119.graphClass_= graph_OrgUnit
if self.genGraphics:
new_obj = graph_OrgUnit(370.0,890.0,self.obj119)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("OrgUnit", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj119.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj119)
self.globalAndLocalPostcondition(self.obj119, rootNode)
self.obj119.postAction( rootNode.CREATE )
self.obj104=Role(self)
self.obj104.isGraphObjectVisual = True
if(hasattr(self.obj104, '_setHierarchicalLink')):
self.obj104._setHierarchicalLink(False)
# isMetaRole
self.obj104.isMetaRole.setValue((None, 0))
self.obj104.isMetaRole.config = 0
# hasActions
self.obj104.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj104.hasActions.setValue(lcobj2)
# ID
self.obj104.ID.setValue('R|0')
# name
self.obj104.name.setValue('Scout')
self.obj104.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(190.0,730.0,self.obj104)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj104.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj104)
self.globalAndLocalPostcondition(self.obj104, rootNode)
self.obj104.postAction( rootNode.CREATE )
self.obj105=Role(self)
self.obj105.isGraphObjectVisual = True
if(hasattr(self.obj105, '_setHierarchicalLink')):
self.obj105._setHierarchicalLink(False)
# isMetaRole
self.obj105.isMetaRole.setValue((None, 0))
self.obj105.isMetaRole.config = 0
# hasActions
self.obj105.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('harvestItem', 20)
lcobj2.append(cobj2)
cobj2=ATOM3String('craftItem', 20)
lcobj2.append(cobj2)
self.obj105.hasActions.setValue(lcobj2)
# ID
self.obj105.ID.setValue('R|1')
# name
self.obj105.name.setValue('Maker')
self.obj105.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(340.0,730.0,self.obj105)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj105.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj105)
self.globalAndLocalPostcondition(self.obj105, rootNode)
self.obj105.postAction( rootNode.CREATE )
self.obj106=Role(self)
self.obj106.isGraphObjectVisual = True
if(hasattr(self.obj106, '_setHierarchicalLink')):
self.obj106._setHierarchicalLink(False)
# isMetaRole
self.obj106.isMetaRole.setValue((None, 0))
self.obj106.isMetaRole.config = 0
# hasActions
self.obj106.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('learnSpell', 20)
lcobj2.append(cobj2)
self.obj106.hasActions.setValue(lcobj2)
# ID
self.obj106.ID.setValue('R|2')
# name
self.obj106.name.setValue('Wizard')
self.obj106.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(790.0,730.0,self.obj106)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj106.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj106)
self.globalAndLocalPostcondition(self.obj106, rootNode)
self.obj106.postAction( rootNode.CREATE )
self.obj125=Role(self)
self.obj125.isGraphObjectVisual = True
if(hasattr(self.obj125, '_setHierarchicalLink')):
self.obj125._setHierarchicalLink(False)
# isMetaRole
self.obj125.isMetaRole.setValue((None, 0))
self.obj125.isMetaRole.config = 0
# hasActions
self.obj125.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
self.obj125.hasActions.setValue(lcobj2)
# ID
self.obj125.ID.setValue('R|3')
# name
self.obj125.name.setValue('PartyFounder')
self.obj125.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(790.0,860.0,self.obj125)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj125.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj125)
self.globalAndLocalPostcondition(self.obj125, rootNode)
self.obj125.postAction( rootNode.CREATE )
self.obj126=Role(self)
self.obj126.isGraphObjectVisual = True
if(hasattr(self.obj126, '_setHierarchicalLink')):
self.obj126._setHierarchicalLink(False)
# isMetaRole
self.obj126.isMetaRole.setValue((None, 0))
self.obj126.isMetaRole.config = 0
# hasActions
self.obj126.hasActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
self.obj126.hasActions.setValue(lcobj2)
# ID
self.obj126.ID.setValue('R|4')
# name
self.obj126.name.setValue('PartyMember')
self.obj126.graphClass_= graph_Role
if self.genGraphics:
new_obj = graph_Role(790.0,940.0,self.obj126)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Role", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj126.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj126)
self.globalAndLocalPostcondition(self.obj126, rootNode)
self.obj126.postAction( rootNode.CREATE )
self.obj86=Action(self)
self.obj86.isGraphObjectVisual = True
if(hasattr(self.obj86, '_setHierarchicalLink')):
self.obj86._setHierarchicalLink(False)
# ID
self.obj86.ID.setValue('A|0')
# name
self.obj86.name.setValue('move')
# ActionCode
self.obj86.ActionCode.setValue('#action code template\nclass BehaviourNamePlaceholder(spade.Behaviour.OneShotBehaviour):\n """Behaviour available to agents."""\n def _process(self):\n pass\n')
self.obj86.ActionCode.setHeight(15)
self.obj86.graphClass_= graph_Action
if self.genGraphics:
new_obj = graph_Action(180.0,590.0,self.obj86)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Action", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['Text Scale'] = 1.0
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj86.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj86)
self.globalAndLocalPostcondition(self.obj86, rootNode)
self.obj86.postAction( rootNode.CREATE )
self.obj87=Action(self)
self.obj87.isGraphObjectVisual = True
if(hasattr(self.obj87, '_setHierarchicalLink')):
self.obj87._setHierarchicalLink(False)
# ID
self.obj87.ID.setValue('A|1')
# name
self.obj87.name.setValue('harvestItem')
# ActionCode
self.obj87.ActionCode.setValue('#action code template\nclass BehaviourNamePlaceholder(spade.Behaviour.OneShotBehaviour):\n """Behaviour available to agents."""\n def _process(self):\n pass\n')
self.obj87.ActionCode.setHeight(15)
self.obj87.graphClass_= graph_Action
if self.genGraphics:
new_obj = graph_Action(280.0,590.0,self.obj87)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Action", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj87.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj87)
self.globalAndLocalPostcondition(self.obj87, rootNode)
self.obj87.postAction( rootNode.CREATE )
self.obj88=Action(self)
self.obj88.isGraphObjectVisual = True
if(hasattr(self.obj88, '_setHierarchicalLink')):
self.obj88._setHierarchicalLink(False)
# ID
self.obj88.ID.setValue('A|2')
# name
self.obj88.name.setValue('craftItem')
# ActionCode
self.obj88.ActionCode.setValue('#action code template\nclass BehaviourNamePlaceholder(spade.Behaviour.OneShotBehaviour):\n """Behaviour available to agents."""\n def _process(self):\n pass\n')
self.obj88.ActionCode.setHeight(15)
self.obj88.graphClass_= graph_Action
if self.genGraphics:
new_obj = graph_Action(380.0,590.0,self.obj88)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Action", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj88.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj88)
self.globalAndLocalPostcondition(self.obj88, rootNode)
self.obj88.postAction( rootNode.CREATE )
self.obj89=Action(self)
self.obj89.isGraphObjectVisual = True
if(hasattr(self.obj89, '_setHierarchicalLink')):
self.obj89._setHierarchicalLink(False)
# ID
self.obj89.ID.setValue('A|3')
# name
self.obj89.name.setValue('learnSpell')
# ActionCode
self.obj89.ActionCode.setValue('#action code template\nclass BehaviourNamePlaceholder(spade.Behaviour.OneShotBehaviour):\n """Behaviour available to agents."""\n def _process(self):\n pass\n')
self.obj89.ActionCode.setHeight(15)
self.obj89.graphClass_= graph_Action
if self.genGraphics:
new_obj = graph_Action(780.0,590.0,self.obj89)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Action", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['Text Scale'] = 1.01
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj89.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj89)
self.globalAndLocalPostcondition(self.obj89, rootNode)
self.obj89.postAction( rootNode.CREATE )
self.obj133=IndividualKnArt(self)
self.obj133.isGraphObjectVisual = True
if(hasattr(self.obj133, '_setHierarchicalLink')):
self.obj133._setHierarchicalLink(False)
# description
self.obj133.description.setValue('AvatarInventory')
# ID
self.obj133.ID.setValue('AvatarInventory')
# name
self.obj133.name.setValue('AvatarInventory')
# KnArtContent
self.obj133.KnArtContent.setValue('#content of the artifact\n')
self.obj133.KnArtContent.setHeight(15)
self.obj133.graphClass_= graph_IndividualKnArt
if self.genGraphics:
new_obj = graph_IndividualKnArt(480.0,1040.0,self.obj133)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("IndividualKnArt", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj133.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj133)
self.globalAndLocalPostcondition(self.obj133, rootNode)
self.obj133.postAction( rootNode.CREATE )
self.obj134=IndividualKnArt(self)
self.obj134.isGraphObjectVisual = True
if(hasattr(self.obj134, '_setHierarchicalLink')):
self.obj134._setHierarchicalLink(False)
# description
self.obj134.description.setValue('KnArtDesc')
# ID
self.obj134.ID.setValue('AvatarAttributes')
# name
self.obj134.name.setValue('AvatarAttributes')
# KnArtContent
self.obj134.KnArtContent.setValue('#content of the artifact\n')
self.obj134.KnArtContent.setHeight(15)
self.obj134.graphClass_= graph_IndividualKnArt
if self.genGraphics:
new_obj = graph_IndividualKnArt(600.0,1040.0,self.obj134)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("IndividualKnArt", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj134.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj134)
self.globalAndLocalPostcondition(self.obj134, rootNode)
self.obj134.postAction( rootNode.CREATE )
self.obj44=Objective(self)
self.obj44.isGraphObjectVisual = True
if(hasattr(self.obj44, '_setHierarchicalLink')):
self.obj44._setHierarchicalLink(False)
# description
self.obj44.description.setValue('\n')
self.obj44.description.setHeight(4)
# ofActions
self.obj44.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
self.obj44.ofActions.setValue(lcobj2)
# Measurement
self.obj44.Measurement.setValue('\n')
self.obj44.Measurement.setHeight(4)
# Reward
self.obj44.Reward.setValue('\n')
self.obj44.Reward.setHeight(4)
# ID
self.obj44.ID.setValue('O|0')
# name
self.obj44.name.setValue('FinishQuestForTheDragonEgg')
self.obj44.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(544.0,31.0,self.obj44)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['Text Scale'] = 1.1
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj44.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj44)
self.globalAndLocalPostcondition(self.obj44, rootNode)
self.obj44.postAction( rootNode.CREATE )
self.obj45=Objective(self)
self.obj45.isGraphObjectVisual = True
if(hasattr(self.obj45, '_setHierarchicalLink')):
self.obj45._setHierarchicalLink(False)
# description
self.obj45.description.setValue('\n')
self.obj45.description.setHeight(4)
# ofActions
self.obj45.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
self.obj45.ofActions.setValue(lcobj2)
# Measurement
self.obj45.Measurement.setValue('\n')
self.obj45.Measurement.setHeight(4)
# Reward
self.obj45.Reward.setValue('\n')
self.obj45.Reward.setHeight(4)
# ID
self.obj45.ID.setValue('O|1')
# name
self.obj45.name.setValue('HatchDragonEgg')
self.obj45.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(464.0,131.0,self.obj45)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj45.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj45)
self.globalAndLocalPostcondition(self.obj45, rootNode)
self.obj45.postAction( rootNode.CREATE )
self.obj46=Objective(self)
self.obj46.isGraphObjectVisual = True
if(hasattr(self.obj46, '_setHierarchicalLink')):
self.obj46._setHierarchicalLink(False)
# description
self.obj46.description.setValue('\n')
self.obj46.description.setHeight(4)
# ofActions
self.obj46.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('learn', 20)
lcobj2.append(cobj2)
self.obj46.ofActions.setValue(lcobj2)
# Measurement
self.obj46.Measurement.setValue('\n')
self.obj46.Measurement.setHeight(4)
# Reward
self.obj46.Reward.setValue('\n')
self.obj46.Reward.setHeight(4)
# ID
self.obj46.ID.setValue('O|2')
# name
self.obj46.name.setValue('LearnSpell')
self.obj46.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(694.0,131.0,self.obj46)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj46.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj46)
self.globalAndLocalPostcondition(self.obj46, rootNode)
self.obj46.postAction( rootNode.CREATE )
self.obj47=Objective(self)
self.obj47.isGraphObjectVisual = True
if(hasattr(self.obj47, '_setHierarchicalLink')):
self.obj47._setHierarchicalLink(False)
# description
self.obj47.description.setValue('\n')
self.obj47.description.setHeight(4)
# ofActions
self.obj47.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('craftItem', 20)
lcobj2.append(cobj2)
self.obj47.ofActions.setValue(lcobj2)
# Measurement
self.obj47.Measurement.setValue('\n')
self.obj47.Measurement.setHeight(4)
# Reward
self.obj47.Reward.setValue('\n')
self.obj47.Reward.setHeight(4)
# ID
self.obj47.ID.setValue('O|3')
# name
self.obj47.name.setValue('BrewHatchingPotion')
self.obj47.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(220.0,280.0,self.obj47)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj47.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj47)
self.globalAndLocalPostcondition(self.obj47, rootNode)
self.obj47.postAction( rootNode.CREATE )
self.obj48=Objective(self)
self.obj48.isGraphObjectVisual = True
if(hasattr(self.obj48, '_setHierarchicalLink')):
self.obj48._setHierarchicalLink(False)
# description
self.obj48.description.setValue('\n')
self.obj48.description.setHeight(4)
# ofActions
self.obj48.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj48.ofActions.setValue(lcobj2)
# Measurement
self.obj48.Measurement.setValue('\n')
self.obj48.Measurement.setHeight(4)
# Reward
self.obj48.Reward.setValue('\n')
self.obj48.Reward.setHeight(4)
# ID
self.obj48.ID.setValue('O|4')
# name
self.obj48.name.setValue('TransportDragonEgg')
self.obj48.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(520.0,280.0,self.obj48)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj48.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj48)
self.globalAndLocalPostcondition(self.obj48, rootNode)
self.obj48.postAction( rootNode.CREATE )
self.obj49=Objective(self)
self.obj49.isGraphObjectVisual = True
if(hasattr(self.obj49, '_setHierarchicalLink')):
self.obj49._setHierarchicalLink(False)
# description
self.obj49.description.setValue('\n')
self.obj49.description.setHeight(4)
# ofActions
self.obj49.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj49.ofActions.setValue(lcobj2)
# Measurement
self.obj49.Measurement.setValue('\n')
self.obj49.Measurement.setHeight(4)
# Reward
self.obj49.Reward.setValue('\n')
self.obj49.Reward.setHeight(4)
# ID
self.obj49.ID.setValue('O|5')
# name
self.obj49.name.setValue('FindEggHermit')
self.obj49.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(680.0,280.0,self.obj49)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj49.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj49)
self.globalAndLocalPostcondition(self.obj49, rootNode)
self.obj49.postAction( rootNode.CREATE )
self.obj50=Objective(self)
self.obj50.isGraphObjectVisual = True
if(hasattr(self.obj50, '_setHierarchicalLink')):
self.obj50._setHierarchicalLink(False)
# description
self.obj50.description.setValue('\n')
self.obj50.description.setHeight(4)
# ofActions
self.obj50.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj50.ofActions.setValue(lcobj2)
# Measurement
self.obj50.Measurement.setValue('\n')
self.obj50.Measurement.setHeight(4)
# Reward
self.obj50.Reward.setValue('\n')
self.obj50.Reward.setHeight(4)
# ID
self.obj50.ID.setValue('O|6')
# name
self.obj50.name.setValue('FindDragonEgg')
self.obj50.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(380.0,280.0,self.obj50)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj50.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj50)
self.globalAndLocalPostcondition(self.obj50, rootNode)
self.obj50.postAction( rootNode.CREATE )
self.obj51=Objective(self)
self.obj51.isGraphObjectVisual = True
if(hasattr(self.obj51, '_setHierarchicalLink')):
self.obj51._setHierarchicalLink(False)
# description
self.obj51.description.setValue('\n')
self.obj51.description.setHeight(4)
# ofActions
self.obj51.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
self.obj51.ofActions.setValue(lcobj2)
# Measurement
self.obj51.Measurement.setValue('\n')
self.obj51.Measurement.setHeight(4)
# Reward
self.obj51.Reward.setValue('\n')
self.obj51.Reward.setHeight(4)
# ID
self.obj51.ID.setValue('O|7')
# name
self.obj51.name.setValue('GatherPotionIngredients')
self.obj51.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(60.0,280.0,self.obj51)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj51.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj51)
self.globalAndLocalPostcondition(self.obj51, rootNode)
self.obj51.postAction( rootNode.CREATE )
self.obj52=Objective(self)
self.obj52.isGraphObjectVisual = True
if(hasattr(self.obj52, '_setHierarchicalLink')):
self.obj52._setHierarchicalLink(False)
# description
self.obj52.description.setValue('\n')
self.obj52.description.setHeight(4)
# ofActions
self.obj52.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('move', 20)
lcobj2.append(cobj2)
self.obj52.ofActions.setValue(lcobj2)
# Measurement
self.obj52.Measurement.setValue('\n')
self.obj52.Measurement.setHeight(4)
# Reward
self.obj52.Reward.setValue('\n')
self.obj52.Reward.setHeight(4)
# ID
self.obj52.ID.setValue('O|8')
# name
self.obj52.name.setValue('FindItemIngredient')
self.obj52.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(50.0,430.0,self.obj52)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj52.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj52)
self.globalAndLocalPostcondition(self.obj52, rootNode)
self.obj52.postAction( rootNode.CREATE )
self.obj53=Objective(self)
self.obj53.isGraphObjectVisual = True
if(hasattr(self.obj53, '_setHierarchicalLink')):
self.obj53._setHierarchicalLink(False)
# description
self.obj53.description.setValue('\n')
self.obj53.description.setHeight(4)
# ofActions
self.obj53.ofActions.setActionFlags([ 0, 0, 1, 0])
lcobj2 =[]
cobj2=ATOM3String('harvestItem', 20)
lcobj2.append(cobj2)
self.obj53.ofActions.setValue(lcobj2)
# Measurement
self.obj53.Measurement.setValue('\n')
self.obj53.Measurement.setHeight(4)
# Reward
self.obj53.Reward.setValue('\n')
self.obj53.Reward.setHeight(4)
# ID
self.obj53.ID.setValue('O|9')
# name
self.obj53.name.setValue('HarvestItemIngredient')
self.obj53.graphClass_= graph_Objective
if self.genGraphics:
new_obj = graph_Objective(240.0,430.0,self.obj53)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("Objective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj53.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj53)
self.globalAndLocalPostcondition(self.obj53, rootNode)
self.obj53.postAction( rootNode.CREATE )
self.obj132=isPartOfOrgUnit(self)
self.obj132.isGraphObjectVisual = True
if(hasattr(self.obj132, '_setHierarchicalLink')):
self.obj132._setHierarchicalLink(True)
# ID
self.obj132.ID.setValue('pOU|0')
self.obj132.graphClass_= graph_isPartOfOrgUnit
if self.genGraphics:
new_obj = graph_isPartOfOrgUnit(486.0,954.0,self.obj132)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("isPartOfOrgUnit", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj132.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj132)
self.globalAndLocalPostcondition(self.obj132, rootNode)
self.obj132.postAction( rootNode.CREATE )
self.obj124=canHaveRole(self)
self.obj124.isGraphObjectVisual = True
if(hasattr(self.obj124, '_setHierarchicalLink')):
self.obj124._setHierarchicalLink(True)
# ID
self.obj124.ID.setValue('OUR|0')
self.obj124.graphClass_= graph_canHaveRole
if self.genGraphics:
new_obj = graph_canHaveRole(601.5,788.5,self.obj124)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("canHaveRole", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj124.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj124)
self.globalAndLocalPostcondition(self.obj124, rootNode)
self.obj124.postAction( rootNode.CREATE )
self.obj131=canHaveRole(self)
self.obj131.isGraphObjectVisual = True
if(hasattr(self.obj131, '_setHierarchicalLink')):
self.obj131._setHierarchicalLink(True)
# ID
self.obj131.ID.setValue('OUR|1')
self.obj131.graphClass_= graph_canHaveRole
if self.genGraphics:
new_obj = graph_canHaveRole(678.5,952.5,self.obj131)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("canHaveRole", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj131.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj131)
self.globalAndLocalPostcondition(self.obj131, rootNode)
self.obj131.postAction( rootNode.CREATE )
self.obj113=hasActions(self)
self.obj113.isGraphObjectVisual = True
if(hasattr(self.obj113, '_setHierarchicalLink')):
self.obj113._setHierarchicalLink(False)
# ID
self.obj113.ID.setValue('aR|0')
self.obj113.graphClass_= graph_hasActions
if self.genGraphics:
new_obj = graph_hasActions(226.0,699.0,self.obj113)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasActions", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj113.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj113)
self.globalAndLocalPostcondition(self.obj113, rootNode)
self.obj113.postAction( rootNode.CREATE )
self.obj114=hasActions(self)
self.obj114.isGraphObjectVisual = True
if(hasattr(self.obj114, '_setHierarchicalLink')):
self.obj114._setHierarchicalLink(False)
# ID
self.obj114.ID.setValue('aR|1')
self.obj114.graphClass_= graph_hasActions
if self.genGraphics:
new_obj = graph_hasActions(374.0,701.0,self.obj114)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasActions", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj114.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj114)
self.globalAndLocalPostcondition(self.obj114, rootNode)
self.obj114.postAction( rootNode.CREATE )
self.obj115=hasActions(self)
self.obj115.isGraphObjectVisual = True
if(hasattr(self.obj115, '_setHierarchicalLink')):
self.obj115._setHierarchicalLink(False)
# ID
self.obj115.ID.setValue('aR|2')
self.obj115.graphClass_= graph_hasActions
if self.genGraphics:
new_obj = graph_hasActions(827.0,698.0,self.obj115)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasActions", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj115.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj115)
self.globalAndLocalPostcondition(self.obj115, rootNode)
self.obj115.postAction( rootNode.CREATE )
self.obj145=canAccessKnArt(self)
self.obj145.isGraphObjectVisual = True
if(hasattr(self.obj145, '_setHierarchicalLink')):
self.obj145._setHierarchicalLink(False)
# ID
self.obj145.ID.setValue('accKA|0')
self.obj145.graphClass_= graph_canAccessKnArt
if self.genGraphics:
new_obj = graph_canAccessKnArt(563.5,1042.5,self.obj145)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("canAccessKnArt", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj145.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj145)
self.globalAndLocalPostcondition(self.obj145, rootNode)
self.obj145.postAction( rootNode.CREATE )
self.obj75=isPartOfObjective(self)
self.obj75.isGraphObjectVisual = True
if(hasattr(self.obj75, '_setHierarchicalLink')):
self.obj75._setHierarchicalLink(True)
# ID
self.obj75.ID.setValue('pO|0')
self.obj75.graphClass_= graph_isPartOfObjective
if self.genGraphics:
new_obj = graph_isPartOfObjective(177.0,398.0,self.obj75)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("isPartOfObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj75.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj75)
self.globalAndLocalPostcondition(self.obj75, rootNode)
self.obj75.postAction( rootNode.CREATE )
self.obj80=isPartOfObjective(self)
self.obj80.isGraphObjectVisual = True
if(hasattr(self.obj80, '_setHierarchicalLink')):
self.obj80._setHierarchicalLink(True)
# ID
self.obj80.ID.setValue('pO|1')
self.obj80.graphClass_= graph_isPartOfObjective
if self.genGraphics:
new_obj = graph_isPartOfObjective(503.0,249.0,self.obj80)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("isPartOfObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj80.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj80)
self.globalAndLocalPostcondition(self.obj80, rootNode)
self.obj80.postAction( rootNode.CREATE )
self.obj82=isPartOfObjective(self)
self.obj82.isGraphObjectVisual = True
if(hasattr(self.obj82, '_setHierarchicalLink')):
self.obj82._setHierarchicalLink(True)
# ID
self.obj82.ID.setValue('pO|2')
self.obj82.graphClass_= graph_isPartOfObjective
if self.genGraphics:
new_obj = graph_isPartOfObjective(609.0,137.0,self.obj82)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("isPartOfObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj82.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj82)
self.globalAndLocalPostcondition(self.obj82, rootNode)
self.obj82.postAction( rootNode.CREATE )
self.obj98=hasObjective(self)
self.obj98.isGraphObjectVisual = True
if(hasattr(self.obj98, '_setHierarchicalLink')):
self.obj98._setHierarchicalLink(False)
# ID
self.obj98.ID.setValue('RPO|0')
self.obj98.graphClass_= graph_hasObjective
if self.genGraphics:
new_obj = graph_hasObjective(225.354103175,548.534028917,self.obj98)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj98.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj98)
self.globalAndLocalPostcondition(self.obj98, rootNode)
self.obj98.postAction( rootNode.CREATE )
self.obj99=hasObjective(self)
self.obj99.isGraphObjectVisual = True
if(hasattr(self.obj99, '_setHierarchicalLink')):
self.obj99._setHierarchicalLink(False)
# ID
self.obj99.ID.setValue('RPO|1')
self.obj99.graphClass_= graph_hasObjective
if self.genGraphics:
new_obj = graph_hasObjective(325.492909804,550.264185836,self.obj99)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj99.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj99)
self.globalAndLocalPostcondition(self.obj99, rootNode)
self.obj99.postAction( rootNode.CREATE )
self.obj100=hasObjective(self)
self.obj100.isGraphObjectVisual = True
if(hasattr(self.obj100, '_setHierarchicalLink')):
self.obj100._setHierarchicalLink(False)
# ID
self.obj100.ID.setValue('RPO|2')
self.obj100.graphClass_= graph_hasObjective
if self.genGraphics:
new_obj = graph_hasObjective(424.752042781,549.309380895,self.obj100)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj100.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj100)
self.globalAndLocalPostcondition(self.obj100, rootNode)
self.obj100.postAction( rootNode.CREATE )
self.obj101=hasObjective(self)
self.obj101.isGraphObjectVisual = True
if(hasattr(self.obj101, '_setHierarchicalLink')):
self.obj101._setHierarchicalLink(False)
# ID
self.obj101.ID.setValue('RPO|3')
self.obj101.graphClass_= graph_hasObjective
if self.genGraphics:
new_obj = graph_hasObjective(824.680362868,550.007372124,self.obj101)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("hasObjective", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj101.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj101)
self.globalAndLocalPostcondition(self.obj101, rootNode)
self.obj101.postAction( rootNode.CREATE )
self.obj74=precedentTo(self)
self.obj74.isGraphObjectVisual = True
if(hasattr(self.obj74, '_setHierarchicalLink')):
self.obj74._setHierarchicalLink(True)
# ID
self.obj74.ID.setValue('OpO|0')
self.obj74.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(203.489587602,474.039234071,self.obj74)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj74.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj74)
self.globalAndLocalPostcondition(self.obj74, rootNode)
self.obj74.postAction( rootNode.CREATE )
self.obj76=precedentTo(self)
self.obj76.isGraphObjectVisual = True
if(hasattr(self.obj76, '_setHierarchicalLink')):
self.obj76._setHierarchicalLink(True)
# ID
self.obj76.ID.setValue('OpO|1')
self.obj76.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(202.120938013,322.2623221,self.obj76)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj76.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj76)
self.globalAndLocalPostcondition(self.obj76, rootNode)
self.obj76.postAction( rootNode.CREATE )
self.obj77=precedentTo(self)
self.obj77.isGraphObjectVisual = True
if(hasattr(self.obj77, '_setHierarchicalLink')):
self.obj77._setHierarchicalLink(True)
# ID
self.obj77.ID.setValue('OpO|2')
self.obj77.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(353.454745276,323.455638382,self.obj77)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj77.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj77)
self.globalAndLocalPostcondition(self.obj77, rootNode)
self.obj77.postAction( rootNode.CREATE )
self.obj78=precedentTo(self)
self.obj78.isGraphObjectVisual = True
if(hasattr(self.obj78, '_setHierarchicalLink')):
self.obj78._setHierarchicalLink(True)
# ID
self.obj78.ID.setValue('OpO|3')
self.obj78.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(501.991811703,335.007768111,self.obj78)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj78.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj78)
self.globalAndLocalPostcondition(self.obj78, rootNode)
self.obj78.postAction( rootNode.CREATE )
self.obj79=precedentTo(self)
self.obj79.isGraphObjectVisual = True
if(hasattr(self.obj79, '_setHierarchicalLink')):
self.obj79._setHierarchicalLink(True)
# ID
self.obj79.ID.setValue('OpO|4')
self.obj79.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(653.376888985,330.342413802,self.obj79)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj79.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj79)
self.globalAndLocalPostcondition(self.obj79, rootNode)
self.obj79.postAction( rootNode.CREATE )
self.obj81=precedentTo(self)
self.obj81.isGraphObjectVisual = True
if(hasattr(self.obj81, '_setHierarchicalLink')):
self.obj81._setHierarchicalLink(True)
# ID
self.obj81.ID.setValue('OpO|5')
self.obj81.graphClass_= graph_precedentTo
if self.genGraphics:
new_obj = graph_precedentTo(609.357627945,190.638564425,self.obj81)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("precedentTo", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj81.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj81)
self.globalAndLocalPostcondition(self.obj81, rootNode)
self.obj81.postAction( rootNode.CREATE )
# Connections for obj118 (graphObject_: Obj49) named Avatar
self.drawConnections(
(self.obj118,self.obj124,[561.0, 953.0, 624.0, 934.0, 601.5, 788.5],"true", 3),
(self.obj118,self.obj131,[561.0, 953.0, 678.5, 952.5],"true", 2),
(self.obj118,self.obj132,[561.0, 953.0, 486.0, 954.0],"true", 2),
(self.obj118,self.obj145,[561.0, 953.0, 489.0, 990.0, 563.5, 1042.5],"true", 3) )
# Connections for obj119 (graphObject_: Obj50) named Party
self.drawConnections(
)
# Connections for obj104 (graphObject_: Obj40) named Scout
self.drawConnections(
(self.obj104,self.obj113,[226.0, 782.0, 174.0, 739.0, 226.0, 699.0],"true", 3) )
# Connections for obj105 (graphObject_: Obj41) named Maker
self.drawConnections(
(self.obj105,self.obj114,[376.0, 782.0, 320.0, 747.0, 374.0, 701.0],"true", 3) )
# Connections for obj106 (graphObject_: Obj42) named Wizard
self.drawConnections(
(self.obj106,self.obj115,[826.0, 782.0, 763.0, 744.0, 827.0, 698.0],"true", 3) )
# Connections for obj125 (graphObject_: Obj53) named PartyFounder
self.drawConnections(
)
# Connections for obj126 (graphObject_: Obj54) named PartyMember
self.drawConnections(
)
# Connections for obj86 (graphObject_: Obj28) named move
self.drawConnections(
(self.obj86,self.obj98,[224.0, 628.0, 225.35410317499964, 548.5340289174825],"true", 2) )
# Connections for obj87 (graphObject_: Obj29) named harvestItem
self.drawConnections(
(self.obj87,self.obj99,[324.0, 628.0, 325.49290980409205, 550.2641858357085],"true", 2) )
# Connections for obj88 (graphObject_: Obj30) named craftItem
self.drawConnections(
(self.obj88,self.obj100,[424.0, 628.0, 424.7520427813149, 549.3093808947315],"true", 2) )
# Connections for obj89 (graphObject_: Obj31) named learnSpell
self.drawConnections(
(self.obj89,self.obj101,[824.0, 628.0, 824.6803628684763, 550.0073721236759],"true", 2) )
# Connections for obj133 (graphObject_: Obj59) named AvatarInventory
self.drawConnections(
)
# Connections for obj134 (graphObject_: Obj60) named AvatarAttributes
self.drawConnections(
)
# Connections for obj44 (graphObject_: Obj0) named FinishQuestForTheDragonEgg
self.drawConnections(
)
# Connections for obj45 (graphObject_: Obj1) named HatchDragonEgg
self.drawConnections(
(self.obj45,self.obj81,[713.5040855626298, 414.61876178946295, 609.3576279446208, 190.6385644254878],"true", 0),
(self.obj45,self.obj82,[713.5040855626298, 414.61876178946295, 609.0, 137.0],"true", 0) )
# Connections for obj46 (graphObject_: Obj2) named LearnSpell
self.drawConnections(
(self.obj46,self.obj82,[1197.9501491946323, 297.20372094102413, 609.0, 137.0],"true", 0) )
# Connections for obj47 (graphObject_: Obj3) named BrewHatchingPotion
self.drawConnections(
(self.obj47,self.obj77,[380.61803230937585, 245.72047989466682, 353.4547452756782, 323.45563838240116],"true", 0),
(self.obj47,self.obj80,[380.61803230937585, 245.72047989466682, 503.0, 249.0],"true", 0) )
# Connections for obj48 (graphObject_: Obj4) named TransportDragonEgg
self.drawConnections(
(self.obj48,self.obj79,[768.1966051352533, 463.7319071577135, 653.3768889850651, 330.34241380246567],"true", 0),
(self.obj48,self.obj80,[768.1966051352533, 463.7319071577135, 503.0, 249.0],"true", 0) )
# Connections for obj49 (graphObject_: Obj5) named FindEggHermit
self.drawConnections(
(self.obj49,self.obj80,[961.6229558403763, 493.8694299102599, 503.0, 249.0],"true", 0) )
# Connections for obj50 (graphObject_: Obj6) named FindDragonEgg
self.drawConnections(
(self.obj50,self.obj78,[356.5040855626298, 465.61876178946295, 501.9918117034965, 335.0077681105661],"true", 0),
(self.obj50,self.obj80,[356.5040855626298, 465.61876178946295, 503.0, 249.0],"true", 0) )
# Connections for obj51 (graphObject_: Obj7) named GatherPotionIngredients
self.drawConnections(
(self.obj51,self.obj76,[174.70820634999927, 412.068057834965, 202.1209380131943, 322.26232210038927],"true", 0) )
# Connections for obj52 (graphObject_: Obj8) named FindItemIngredient
self.drawConnections(
(self.obj52,self.obj74,[-235.0, 141.0, 203.48958760238037, 474.03923407073523],"true", 0),
(self.obj52,self.obj75,[-235.0, 141.0, 177.0, 398.0],"true", 0) )
# Connections for obj53 (graphObject_: Obj9) named HarvestItemIngredient
self.drawConnections(
(self.obj53,self.obj75,[607.7082063499993, 576.068057834965, 177.0, 398.0],"true", 0) )
# Connections for obj132 (graphObject_: Obj57) of type isPartOfOrgUnit
self.drawConnections(
(self.obj132,self.obj119,[486.0, 954.0, 401.0, 953.0],"true", 2) )
# Connections for obj124 (graphObject_: Obj51) of type canHaveRole
self.drawConnections(
(self.obj124,self.obj105,[601.5, 788.5, 487.0, 756.0, 376.0, 782.0],"true", 3),
(self.obj124,self.obj104,[601.5, 788.5, 376.0, 897.0, 226.0, 782.0],"true", 3),
(self.obj124,self.obj106,[601.5, 788.5, 714.0, 803.0, 826.0, 782.0],"true", 3) )
# Connections for obj131 (graphObject_: Obj55) of type canHaveRole
self.drawConnections(
(self.obj131,self.obj125,[678.5, 952.5, 826.0, 912.0],"true", 2),
(self.obj131,self.obj126,[678.5, 952.5, 826.0, 992.0],"true", 2) )
# Connections for obj113 (graphObject_: Obj43) of type hasActions
self.drawConnections(
(self.obj113,self.obj86,[226.0, 699.0, 272.0, 664.0, 224.0, 628.0],"true", 3) )
# Connections for obj114 (graphObject_: Obj45) of type hasActions
self.drawConnections(
(self.obj114,self.obj87,[374.0, 701.0, 377.0, 640.0, 324.0, 628.0],"true", 3),
(self.obj114,self.obj88,[374.0, 701.0, 389.0, 639.0, 424.0, 628.0],"true", 3) )
# Connections for obj115 (graphObject_: Obj47) of type hasActions
self.drawConnections(
(self.obj115,self.obj89,[827.0, 698.0, 906.0, 660.0, 824.0, 628.0],"true", 3) )
# Connections for obj145 (graphObject_: Obj61) of type canAccessKnArt
self.drawConnections(
(self.obj145,self.obj133,[563.5, 1042.5, 500.0, 1060.0],"true", 2),
(self.obj145,self.obj134,[563.5, 1042.5, 620.0, 1060.0],"true", 2) )
# Connections for obj75 (graphObject_: Obj12) of type isPartOfObjective
self.drawConnections(
(self.obj75,self.obj51,[177.0, 398.0, 178.2629049361234, 364.86034626012236, 130.61803230937585, 324.7204798946668],"true", 3) )
# Connections for obj80 (graphObject_: Obj22) of type isPartOfObjective
self.drawConnections(
(self.obj80,self.obj45,[503.0, 249.0, 474.52482250400703, 207.10198421398871, 514.9501491946326, 176.20372094102413],"true", 3) )
# Connections for obj82 (graphObject_: Obj26) of type isPartOfObjective
self.drawConnections(
(self.obj82,self.obj44,[609.0, 137.0, 597.5812361835442, 111.09174255367876, 633.208815266094, 76.1835387990368],"true", 3) )
# Connections for obj98 (graphObject_: Obj32) of type hasObjective
self.drawConnections(
(self.obj98,self.obj52,[225.35410317499964, 548.5340289174825, 158.0, 542.0, 104.70820634999927, 475.068057834965],"true", 3),
(self.obj98,self.obj50,[225.35410317499964, 548.5340289174825, 504.0, 452.0, 426.1966051352533, 324.7319071577135],"true", 3),
(self.obj98,self.obj48,[225.35410317499964, 548.5340289174825, 542.0, 459.0, 580.6229558403768, 324.8694299102599],"true", 3),
(self.obj98,self.obj49,[225.35410317499964, 548.5340289174825, 665.0, 464.0, 723.8107339701214, 324.7970978180615],"true", 3) )
# Connections for obj99 (graphObject_: Obj34) of type hasObjective
self.drawConnections(
(self.obj99,self.obj53,[325.49290980409205, 550.2641858357085, 326.0, 496.0, 304.98581960818404, 474.52837167141706],"true", 3) )
# Connections for obj100 (graphObject_: Obj36) of type hasObjective
self.drawConnections(
(self.obj100,self.obj47,[424.7520427813149, 549.3093808947315, 423.0, 422.0, 279.5040855626298, 324.61876178946295],"true", 3) )
# Connections for obj101 (graphObject_: Obj38) of type hasObjective
self.drawConnections(
(self.obj101,self.obj46,[824.6803628684763, 550.0073721236759, 825.0, 204.0, 725.3607257369526, 176.0147442473517],"true", 3) )
# Connections for obj74 (graphObject_: Obj10) of type precedentTo
self.drawConnections(
(self.obj74,self.obj53,[203.48958760238037, 474.03923407073523, 233.80899091692658, 491.6543125298483, 304.98581960818404, 474.52837167141706],"true", 3) )
# Connections for obj76 (graphObject_: Obj14) of type precedentTo
self.drawConnections(
(self.obj76,self.obj47,[202.1209380131943, 322.26232210038927, 223.34245132650778, 311.9868925740883, 279.5040855626298, 324.61876178946295],"true", 3) )
# Connections for obj77 (graphObject_: Obj16) of type precedentTo
self.drawConnections(
(self.obj77,self.obj50,[353.4547452756782, 323.45563838240116, 398.37787516883407, 298.2339247244638, 426.1966051352533, 324.7319071577135],"true", 3) )
# Connections for obj78 (graphObject_: Obj18) of type precedentTo
self.drawConnections(
(self.obj78,self.obj48,[501.9918117034965, 335.0077681105661, 536.3483993797774, 326.79214879870267, 580.6229558403768, 324.8694299102599],"true", 3) )
# Connections for obj79 (graphObject_: Obj20) of type precedentTo
self.drawConnections(
(self.obj79,self.obj49,[653.3768889850651, 330.34241380246567, 690.9238335175013, 329.0743307794161, 723.8107339701214, 324.7970978180615],"true", 3) )
# Connections for obj81 (graphObject_: Obj24) of type precedentTo
self.drawConnections(
(self.obj81,self.obj46,[609.3576279446208, 190.6385644254878, 669.2102720802009, 192.3413202520697, 725.3607257369526, 176.0147442473517],"true", 3) )
newfunction = TMWQuestDragonEggActions_MDL
loadedMMName = 'LSMASOMM_META'
atom3version = '0.3'
| Balannen/LSMASOMM | atom3/Models/TMWQuestDragonEggActions_MDL.py | Python | gpl-3.0 | 56,827 |
# -*- coding: utf-8 -*-
'''
Subtitles.gr Addon
Author Twilight0
SPDX-License-Identifier: GPL-3.0-only
See LICENSES/GPL-3.0-only for more information.
'''
import re, unicodedata
from shutil import copy
from os.path import splitext, exists, split as os_split
from resources.lib import subtitlesgr, xsubstv, podnapisi, vipsubs
from tulip.fuzzywuzzy.fuzz import ratio
from tulip import control
from tulip.compat import urlencode, py3_dec, concurrent_futures
from tulip.log import log_debug
if control.condVisibility('Player.HasVideo'):
infolabel_prefix = 'VideoPlayer'
else:
infolabel_prefix = 'ListItem'
class Search:
def __init__(self, syshandle, sysaddon, langs, action):
self.list = []
self.query = None
self.syshandle = syshandle
self.sysaddon = sysaddon
self.langs = langs
self.action = action
def run(self, query=None):
if 'Greek' not in str(self.langs).split(','):
control.directory(self.syshandle)
control.infoDialog(control.lang(30002))
return
dup_removal = False
if not query:
title = match_title = control.infoLabel('{0}.Title'.format(infolabel_prefix))
with concurrent_futures.ThreadPoolExecutor(5) as executor:
if re.search(r'[^\x00-\x7F]+', title) is not None:
title = control.infoLabel('{0}.OriginalTitle'.format(infolabel_prefix))
title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')
title = py3_dec(title)
year = control.infoLabel('{0}.Year'.format(infolabel_prefix))
tvshowtitle = control.infoLabel('{0}.TVshowtitle'.format(infolabel_prefix))
season = control.infoLabel('{0}.Season'.format(infolabel_prefix))
if len(season) == 1:
season = '0' + season
episode = control.infoLabel('{0}.Episode'.format(infolabel_prefix))
if len(episode) == 1:
episode = '0' + episode
if 's' in episode.lower():
season, episode = '0', episode[-1:]
if tvshowtitle != '': # episode
title_query = '{0} {1}'.format(tvshowtitle, title)
season_episode_query = '{0} S{1} E{2}'.format(tvshowtitle, season, episode)
season_episode_query_nospace = '{0} S{1}E{2}'.format(tvshowtitle, season, episode)
threads = [
executor.submit(self.subtitlesgr, season_episode_query_nospace),
executor.submit(self.xsubstv, season_episode_query),
executor.submit(self.podnapisi, season_episode_query),
executor.submit(self.vipsubs, season_episode_query)
]
dup_removal = True
log_debug('Dual query used for subtitles search: ' + title_query + ' / ' + season_episode_query)
if control.setting('queries') == 'true':
threads.extend(
[
executor.submit(self.subtitlesgr, title_query),
executor.submit(self.vipsubs, title_query),
executor.submit(self.podnapisi, title_query),
executor.submit(self.subtitlesgr, season_episode_query)
]
)
elif year != '': # movie
query = '{0} ({1})'.format(title, year)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query)
]
else: # file
query, year = control.cleanmovietitle(title)
if year != '':
query = '{0} ({1})'.format(query, year)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query)
]
for future in concurrent_futures.as_completed(threads):
item = future.result()
if not item:
continue
self.list.extend(item)
if not dup_removal:
log_debug('Query used for subtitles search: ' + query)
self.query = query
self.query = py3_dec(self.query)
else: # Manual query
with concurrent_futures.ThreadPoolExecutor(5) as executor:
query = match_title = py3_dec(query)
threads = [
executor.submit(self.subtitlesgr, query), executor.submit(self.xsubstv, query),
executor.submit(self.vipsubs, query), executor.submit(self.podnapisi, query)
]
for future in concurrent_futures.as_completed(threads):
item = future.result()
if not item:
continue
self.list.extend(item)
if len(self.list) == 0:
control.directory(self.syshandle)
return
f = []
# noinspection PyUnresolvedReferences
f += [i for i in self.list if i['source'] == 'xsubstv']
f += [i for i in self.list if i['source'] == 'subtitlesgr']
f += [i for i in self.list if i['source'] == 'podnapisi']
f += [i for i in self.list if i['source'] == 'vipsubs']
self.list = f
if dup_removal:
self.list = [dict(t) for t in {tuple(d.items()) for d in self.list}]
for i in self.list:
try:
if i['source'] == 'xsubstv':
i['name'] = u'[xsubstv] {0}'.format(i['name'])
elif i['source'] == 'podnapisi':
i['name'] = u'[podnapisi] {0}'.format(i['name'])
elif i['source'] == 'vipsubs':
i['name'] = u'[vipsubs] {0}'.format(i['name'])
except Exception:
pass
if control.setting('sorting') == '1':
key = 'source'
elif control.setting('sorting') == '2':
key = 'downloads'
elif control.setting('sorting') == '3':
key = 'rating'
else:
key = 'title'
self.list = sorted(self.list, key=lambda k: k[key].lower(), reverse=control.setting('sorting') in ['1', '2', '3'])
for i in self.list:
u = {'action': 'download', 'url': i['url'], 'source': i['source']}
u = '{0}?{1}'.format(self.sysaddon, urlencode(u))
item = control.item(label='Greek', label2=i['name'])
item.setArt({'icon': str(i['rating'])[:1], 'thumb': 'el'})
if ratio(splitext(i['title'].lower())[0], splitext(match_title)[0]) >= int(control.setting('sync_probability')):
item.setProperty('sync', 'true')
else:
item.setProperty('sync', 'false')
item.setProperty('hearing_imp', 'false')
control.addItem(handle=self.syshandle, url=u, listitem=item, isFolder=False)
control.directory(self.syshandle)
def subtitlesgr(self, query=None):
if not query:
query = self.query
try:
if control.setting('subtitles') == 'false':
raise TypeError
result = subtitlesgr.Subtitlesgr().get(query)
return result
except TypeError:
pass
def podnapisi(self, query=None):
if not query:
query = self.query
try:
if control.setting('podnapisi') == 'false':
raise TypeError
result = podnapisi.Podnapisi().get(query)
return result
except TypeError:
pass
def vipsubs(self, query=None):
if not query:
query = self.query
try:
if control.setting('vipsubs') == 'false':
raise TypeError
result = vipsubs.Vipsubs().get(query)
return result
except TypeError:
pass
def xsubstv(self, query=None):
if not query:
query = self.query
try:
if control.setting('xsubs') == 'false':
raise TypeError
result = xsubstv.Xsubstv().get(query)
self.list.extend(result)
except TypeError:
pass
class Download:
def __init__(self, syshandle, sysaddon):
self.syshandle = syshandle
self.sysaddon = sysaddon
# noinspection PyUnboundLocalVariable
def run(self, url, source):
log_debug('Source selected: {0}'.format(source))
path = control.join(control.dataPath, 'temp')
try:
path = path.decode('utf-8')
except Exception:
pass
control.deleteDir(control.join(path, ''), force=True)
control.makeFile(control.dataPath)
control.makeFile(path)
if control.setting('keep_subs') == 'true' or control.setting('keep_zips') == 'true':
if not control.get_info_label('ListItem.Path').startswith('plugin://') and control.setting('destination') == '0':
output_path = control.get_info_label('Container.FolderPath')
elif control.setting('output_folder').startswith('special://'):
output_path = control.transPath(control.setting('output_folder'))
else:
output_path = control.setting('output_folder')
if not exists(output_path):
control.makeFile(output_path)
if source == 'subtitlesgr':
subtitle = subtitlesgr.Subtitlesgr().download(path, url)
elif source == 'xsubstv':
subtitle = xsubstv.Xsubstv().download(path, url)
elif source == 'podnapisi':
subtitle = podnapisi.Podnapisi().download(path, url)
elif source == 'vipsubs':
subtitle = vipsubs.Vipsubs().download(path, url)
else:
subtitle = None
if subtitle is not None:
if control.setting('keep_subs') == 'true':
# noinspection PyUnboundLocalVariable
try:
if control.setting('destination') in ['0', '2']:
if control.infoLabel('{0}.Title'.format(infolabel_prefix)).startswith('plugin://'):
copy(subtitle, control.join(output_path, os_split(subtitle)[1]))
log_debug('Item currently selected is not a local file, cannot save subtitle next to it')
else:
output_filename = control.join(
output_path, ''.join(
[
splitext(control.infoLabel('ListItem.FileName'))[0],
splitext(os_split(subtitle)[1])[1]
]
)
)
if exists(output_filename):
yesno = control.yesnoDialog(control.lang(30015))
if yesno:
copy(subtitle, output_filename)
else:
copy(subtitle, output_filename)
if control.setting('destination') == '2':
if control.setting('output_folder').startswith('special://'):
output_path = control.transPath(control.setting('output_folder'))
else:
output_path = control.setting('output_folder')
copy(subtitle, control.join(output_path, os_split(subtitle)[1]))
else:
copy(subtitle, control.join(output_path, os_split(subtitle)[1]))
control.infoDialog(control.lang(30008))
except Exception:
control.infoDialog(control.lang(30013))
item = control.item(label=subtitle)
control.addItem(handle=self.syshandle, url=subtitle, listitem=item, isFolder=False)
control.directory(self.syshandle)
| Twilight0/service.subtitles.subtitles.gr | resources/lib/addon.py | Python | gpl-3.0 | 12,802 |
#!/usr/bin/env python
import numpy as np
import mirheo as mir
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--axes', type=float, nargs=3)
parser.add_argument('--coords', type=str)
parser.add_argument('--vis', action='store_true', default=False)
parser.add_argument('--drag', type=float, default=0.0)
args = parser.parse_args()
ranks = (1, 1, 1)
domain = [16, 16, 16]
dt = 1e-3
t_end = 10.0
t_dump_every = 1.0
L = 14.0
num_segments = 10
mass = 1.0
u = mir.Mirheo(ranks, tuple(domain), debug_level=3, log_filename='log', no_splash=True)
# rod
com_q_rod = [[ 0.5 * domain[0],
0.5 * domain[1],
0.5 * domain[2] - L/2,
1.0, 0.0, 0.0, 0.0]]
def center_line(s): return (0, 0, (0.5-s) * L)
def torsion(s): return 0.0
def length(a, b):
return np.sqrt(
(a[0] - b[0])**2 +
(a[1] - b[1])**2 +
(a[2] - b[2])**2)
h = 1.0 / num_segments
l0 = length(center_line(h), center_line(0))
a0 = l0/2
pv_rod = mir.ParticleVectors.RodVector('rod', mass, num_segments)
ic_rod = mir.InitialConditions.Rod(com_q_rod, center_line, torsion, a0)
# ellipsoid
axes = tuple(args.axes)
com_q_ell = [[0.5 * domain[0],
0.5 * domain[1],
0.5 * domain[2] + axes[2],
1., 0, 0, 0]]
coords = np.loadtxt(args.coords).tolist()
if args.vis:
import trimesh
ell = trimesh.creation.icosphere(subdivisions=2, radius = 1.0)
for i in range(3):
ell.vertices[:,i] *= axes[i]
mesh = mir.ParticleVectors.Mesh(ell.vertices.tolist(), ell.faces.tolist())
pv_ell = mir.ParticleVectors.RigidEllipsoidVector('ellipsoid', mass, object_size=len(coords), semi_axes=axes, mesh=mesh)
else:
pv_ell = mir.ParticleVectors.RigidEllipsoidVector('ellipsoid', mass, object_size=len(coords), semi_axes=axes)
ic_ell = mir.InitialConditions.Rigid(com_q_ell, coords)
vv_ell = mir.Integrators.RigidVelocityVerlet("vv_ell")
u.registerParticleVector(pv_ell, ic_ell)
u.registerParticleVector(pv_rod, ic_rod)
u.registerIntegrator(vv_ell)
u.setIntegrator(vv_ell, pv_ell)
# interactions
prms = {
"a0" : a0,
"l0" : l0,
"k_s_center" : 100.0,
"k_s_frame" : 100.0,
"k_bending" : (10.0, 0.0, 10.0),
"k_twist" : 10.0,
"tau0" : 0,
"kappa0" : (0., 0.)
}
int_rod = mir.Interactions.RodForces("rod_forces", **prms);
u.registerInteraction(int_rod)
u.setInteraction(int_rod, pv_rod, pv_rod)
anchor=(0.0, 0.0, -axes[2])
torque = 0.1
k_bound = 100.0
int_bind = mir.Interactions.ObjRodBinding("binding", torque, anchor, k_bound);
u.registerInteraction(int_bind)
u.setInteraction(int_bind, pv_ell, pv_rod)
vv_rod = mir.Integrators.VelocityVerlet('vv_rod')
u.registerIntegrator(vv_rod)
u.setIntegrator(vv_rod, pv_rod)
if args.drag > 0.0:
u.registerPlugins(mir.Plugins.createParticleDrag('rod_drag', pv_rod, args.drag))
if args.vis:
dump_every = int (t_dump_every/dt)
u.registerPlugins(mir.Plugins.createDumpParticles('rod_dump', pv_rod, dump_every, [], 'h5/rod_particles-'))
u.registerPlugins(mir.Plugins.createDumpMesh("mesh_dump", pv_ell, dump_every, path="ply/"))
u.run(int(t_end / dt), dt=dt)
if pv_rod is not None:
pos_rod = pv_rod.getCoordinates()
pos_ell = pv_ell.getCoordinates()
np.savetxt("pos.txt", np.vstack((pos_rod, pos_ell)))
del u
# nTEST: bindings.obj_rod.one
# cd bindings
# rm -rf h5 pos*txt
# f="pos.txt"
# rho=8.0; ax=2.0; ay=1.0; az=1.0
# cp ../../data/ellipsoid_coords_${rho}_${ax}_${ay}_${az}.txt $f
# mir.run --runargs "-n 2" ./obj_rod.py --axes $ax $ay $az --coords $f --vis
# cat pos.txt > pos.out.txt
| dimaleks/uDeviceX | tests/bindings/obj_rod.py | Python | gpl-3.0 | 3,627 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Enrique Henestroza Anguiano
#
"""
Classifier for liblinear, svmlin. Agnostic about features or labels. Uses
'ranking', or dynamic classes.
"""
import sys
import os
import codecs
import svmlight
from libsvm import svmutil, svm
# Optional LIBLinear
try:
from liblinear import liblinearutil, liblinear
liblinear_found = True
except ImportError:
liblinear_found = False
from ctypes import *
from dtbutils import *
from perceptron import KernelLBRankPerceptron, polynomial_kernel
import numpy as np
import cPickle
class Classifier(object):
def __init__(self, param={'model':'', 'pref':'def', 'verbose':False,\
'classtype':'classifier'}):
self._param = param
# Model
self._classtype = param['classtype']
self._modelname = param['model']
self._pref = param['pref']
self._model = None
self._numex_train = 0
self._numex_dev = 0
self._max_feat = 0
self._labs = {}
self._svmclassp = "-s 0 -t 1 -d 3 -g 0.1 -r 0.0 -e 1.0 -c 1.0 -q"
self._svmstrucp = "-z p -t 1 -d 3 -s 0.1 -r 0.0 -e 1.0 -c 0.05 -b 0"
#
# Make a decision using a model on an example, A feature vector is:
# ((feat1,val1), (feat3,val3), ...)
# Where each index corresponds to a feature in the model alphabet. Output
# a list of tuples (class/idx, score) sorted by decending score.
#
def score(self, feats):
m = self._model
if self._classtype == "classifier":
x,_ = svm.gen_svm_nodearray(dict(feats))
return int(svm.libsvm.svm_predict(m, x))
elif self._classtype == "structured":
maxscore = -sys.maxint
maxidx = None
for idx in range(len(feats)):
dec_val = svmlight.classify(m, [(0, feats[idx])])
if dec_val > maxscore:
maxscore = dec_val
maxidx = idx
return maxidx
elif self._classtype == "percrank":
X = [None]*len(feats)
Xisd = [0]*len(feats)
Xisd[0] = 1
for idx in range(len(feats)):
X[idx] = set([f for f,v in feats[idx]])
dec_vals = m.project(X, Xisd)
return dec_vals.index(max(dec_vals))
#
# Reads a ranking problem.
#
def read_rank_problem(self, ef):
efile = codecs.open(ef, 'r', 'ascii')
qid = None
allex = []
rex = []
print >> sys.stderr, "Reading ranking problem..."
for line in efile:
fields = line.rstrip().split(' ')
glab = int(fields.pop(0))
cqid = int(fields.pop(0).split(":")[1])
feats = []
for field in fields:
f,v = field.split(":")
#feats.append((int(f),float(v)))
feats.append(int(f))
feats = set(feats)
if qid == None:
qid = cqid
rex = [(glab, feats)]
elif qid == cqid:
rex.append((glab, feats))
else:
allex.append(rex)
qid = cqid
rex = [(glab, feats)]
allex.append(rex)
efile.close()
# Only supports a one-vs-all ranking (highest glab over rest)
print >> sys.stderr, "Generating ranking constraints...",
X1 = []
X2 = []
X2cnt = 0
Xidx = []
X1isdef = []
X2isdef = []
bline = 0
for rex in allex:
glabs = [glab for glab,_ in rex]
gidx = glabs.index(max(glabs))
cidx = []
for i in range(len(rex)):
glab,feats = rex[i]
if i == 0 and glab == 1:
bline += 1
if i == gidx:
X1.append(feats)
if i == 0:
X1isdef.append(1)
else:
X1isdef.append(0)
else:
cidx.append(X2cnt)
X2.append(feats)
if i == 0:
X2isdef.append(1)
else:
X2isdef.append(0)
X2cnt += 1
Xidx.append(tuple(cidx))
print >> sys.stderr, X2cnt
return X1, X1isdef, X2, X2isdef, Xidx, bline
#
# Append stream of examples to file. Feature vectors are as follows:
# [(feat1, val1), (feat3, val3), ..., (featn, valn)]
#
def write_examples(self, examples, mode="train"):
exstream = codecs.open(self._modelname+"/"+self._pref+"."+mode,\
'a', 'ascii')
# Classification examples over a single line. Label and feature vector:
# 2 0:1 2:1 5:1
# 5 1:1 2:1 4:1
if self._classtype == "classifier":
for glab,feats in examples:
if mode == 'train':
self._numex_train += 1
self._max_feat = max(self._max_feat, feats[-1][0])
self._labs[glab] = True
else:
self._numex_dev += 1
print >> exstream, glab, \
" ".join([str(f)+":"+str(v) for f,v in feats])
# Structured binary examples.
# 1 qid:1 1:1 2:-1 5:-1
# 0 qid:1 1:-1 2:1 4:-1
elif self._classtype in ["structured", "percrank"]:
for idxg,ex in examples:
if mode == 'train':
self._numex_train += 1
qid = self._numex_train
else:
self._numex_dev += 1
qid = self._numex_dev
for idx in range(len(ex)):
feats = ex[idx]
if mode == 'train':
self._max_feat = max(self._max_feat, feats[-1][0])
if idxg == idx:
glab = 1
else:
glab = 0
print >> exstream, glab, 'qid:'+str(qid),\
" ".join([str(f)+":"+str(v) \
for f,v in feats])
exstream.close()
#
# Train model.
#
def train_model(self):
if self._classtype in ["structured", "percrank"]:
self._labs = {1:True}
print >> sys.stderr, "Training model with",\
self._numex_train,"examples,", self._max_feat+1, "features and",\
len(self._labs), "labels."
if self._numex_dev:
print >> sys.stderr, "Also with", self._numex_dev,"dev examples."
ef = self._modelname+"/"+self._pref+".train"
df = self._modelname+"/"+self._pref+".dev"
mf = self._modelname+"/"+self._pref+".model"
if self._classtype == "classifier":
os.system("$LIBSVM/svm-train "+self._svmclassp+" "+ef+" "+mf)
elif self._classtype == "structured":
os.system("$SVMLIGHT/svm_learn "+self._svmstrucp+" "+ef+" "+mf)
elif self._classtype == "percrank":
X1,X1isdef,X2,X2isdef,Xidx,bline = self.read_rank_problem(ef)
X1dev,X1devisdef,X2dev,X2devisdef,Xdevidx,devbline = \
self.read_rank_problem(df)
m = KernelLBRankPerceptron(kernel=polynomial_kernel, T=10, B=0)
m.fit(X1, X1isdef, X2, X2isdef, Xidx, X1dev, X1devisdef, X2dev,\
X2devisdef, Xdevidx, gm=False, bl=devbline)
mfile = open(mf, 'wb')
cPickle.dump([m.sv_a,m.sv_1,m.sv_2,m.bias], mfile, -1)
mfile.close()
#
# Load model.
#
def load_model(self):
if not os.path.isfile(self._modelname+"/"+self._pref+".model"):
return False
if self._classtype == "classifier":
self._model = svmutil.svm_load_model(self._modelname+\
"/"+self._pref+".model")
elif self._classtype == "structured":
self._model = svmlight.read_model(self._modelname+\
"/"+self._pref+".model")
elif self._classtype == "percrank":
m = KernelLBRankPerceptron(kernel=polynomial_kernel)
mfile = open(self._modelname+"/"+self._pref+".model", 'rb')
m.sv_a,m.sv_1,m.sv_2,m.bias = cPickle.load(mfile)
mfile.close()
self._model = m
return True
| enhean/fredist | src/fredist/classifier.py | Python | gpl-3.0 | 8,580 |
#!/usr/bin/python3
""" message_gds.py:
"""
# Import Required Libraries (Standard, Third Party, Local) ********************
import datetime
import logging
if __name__ == "__main__":
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from rpihome_v3.helpers.ipv4_help import check_ipv4
from rpihome_v3.helpers.field_checkers import in_int_range
from rpihome_v3.helpers.field_checkers import is_valid_datetime
# Authorship Info *************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2017, The RPi-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "csmaue@gmail.com"
__status__ = "Development"
# Message Class Definition ****************************************************
class GetDeviceStateMessage(object):
""" Log Status Update message class and methods """
def __init__(self, log=None, **kwargs):
# Configure logger
self.log = log or logging.getLogger(__name__)
self._ref = str()
self._dest_addr = str()
self._dest_port = str()
self._source_addr = str()
self._source_port = str()
self._msg_type = str()
self._dev_name = str()
self._dev_addr = str()
self._dev_status = str()
self._dev_last_seen = str()
self.temp_list = []
# Process input variables if present
if kwargs is not None:
for key, value in kwargs.items():
if key == "ref":
self.ref = value
self.log.debug('Ref Number value set during '
'__init__ to: %s', self.ref)
if key == "dest_addr":
self.dest_addr = value
self.log.debug('Destination address value set during __init__ '
'to: %s', self.dest_addr)
if key == "dest_port":
self.dest_port = value
self.log.debug('Destination port value set during __init__ '
'to: %s', self.dest_port)
if key == "source_addr":
self.source_addr = value
self.log.debug('Source address value set during __init__ '
'to: %s', self.source_addr)
if key == "source_port":
self.source_port = value
self.log.debug('Source port value set during __init__ to: '
'%s', self.source_port)
if key == "msg_type":
self.msg_type = value
self.log.debug('Message type value set during __init__ to: '
'%s', self.msg_type)
if key == "dev_name":
self.dev_name = value
self.log.debug('Device name value set during __init__ to: '
'%s', self.dev_name)
if key == "dev_addr":
self.dev_addr = value
self.log.debug('Device Address value set during __init__ '
'to: %s', self.dev_addr)
if key == "dev_status":
self.dev_status = value
self.log.debug('Device Status value set during __init__ '
'to: %s', self.dev_status)
if key == "dev_last_seen":
self.dev_last_seen = value
self.log.debug('Device last seen value set during __init__ '
'to: %s', self.dev_last_seen)
# ref number field ********************************************************
@property
def ref(self):
self.log.debug('Returning current value of ref number: %s', self._ref)
return self._ref
@ref.setter
def ref(self, value):
if in_int_range(self.log, value, 100, 999) is True:
self._ref = str(value)
self.log.debug('Ref number updated to: %s', self._ref)
else:
self.log.debug('Ref number update failed with input value: '
'%s', value)
# destination address *****************************************************
@property
def dest_addr(self):
self.log.debug('Returning current value of destination address: '
'%s', self._dest_addr)
return self._dest_addr
@dest_addr.setter
def dest_addr(self, value):
if check_ipv4(value) is True:
self._dest_addr = str(value)
self.log.debug('Destination address updated to: '
'%s', self._dest_addr)
else:
self.log.warning('Destination address update failed with input value: '
'%s', value)
# destination port ********************************************************
@property
def dest_port(self):
self.log.debug('Returning current value of destination port: '
'%s', self._dest_port)
return self._dest_port
@dest_port.setter
def dest_port(self, value):
if in_int_range(self.log, value, 10000, 60000) is True:
self._dest_port = str(value)
self.log.debug('Destination port updated to: %s', self._dest_port)
else:
self.log.debug('Destination port update failed with input value: '
'%s', value)
# source address field ****************************************************
@property
def source_addr(self):
self.log.debug('Returning current value of source address: '
'%s', self._source_addr)
return self._source_addr
@source_addr.setter
def source_addr(self, value):
if check_ipv4(value) is True:
self._source_addr = value
self.log.debug('source address updated to: '
'%s', self._source_addr)
else:
self.log.warning('Source address update failed with input value: '
'%s', value)
# source port field *******************************************************
@property
def source_port(self):
self.log.debug('Returning current value of source port: '
'%s', self._source_port)
return self._source_port
@source_port.setter
def source_port(self, value):
if in_int_range(self.log, value, 10000, 60000) is True:
self._source_port = str(value)
self.log.debug('Source port updated to: %s', self._source_port)
else:
self.log.debug('Source port update failed with input value: '
'%s', value)
# message type field ******************************************************
@property
def msg_type(self):
self.log.debug('Returning current value of message type: '
'%s', self._msg_type)
return self._msg_type
@msg_type.setter
def msg_type(self, value):
if in_int_range(self.log, value, 100, 999) is True:
self._msg_type = str(value)
self.log.debug('Message type updated to: %s', self._msg_type)
else:
self.log.debug('Message type update failed with input value: '
'%s', value)
# device name field *******************************************************
@property
def dev_name(self):
self.log.debug('Returning current value of device name: '
'%s', self._dev_name)
return self._dev_name
@dev_name.setter
def dev_name(self, value):
if isinstance(value, str):
self._dev_name = value
else:
self._dev_name = str(value)
self.log.debug('Device name value updated to: '
'%s', self._dev_name)
# device address field ****************************************************
@property
def dev_addr(self):
self.log.debug('Returning current value of device address: '
'%s', self._dev_addr)
return self._dev_addr
@dev_addr.setter
def dev_addr(self, value):
if check_ipv4(value) is True:
self._dev_addr = value
self.log.debug('Device address updated to: '
'%s', self._dev_addr)
else:
self.log.warning('Device address update failed with input value: '
'%s', value)
# device status field *****************************************************
@property
def dev_status(self):
self.log.debug('Returning current value of device status: '
'%s', self._dev_status)
return self._dev_status
@dev_status.setter
def dev_status(self, value):
if isinstance(value, str):
self._dev_status = value.lower()
else:
self._dev_status = (str(value)).lower()
self.log.debug('Device status value updated to: '
'%s', self._dev_status)
# device last seen field **************************************************
@property
def dev_last_seen(self):
self.log.debug('Returning current value of device last seen: '
'%s', self._dev_last_seen)
return self._dev_last_seen
@dev_last_seen.setter
def dev_last_seen(self, value):
self._dev_last_seen = is_valid_datetime(
self.log,
value,
self._dev_last_seen)
self.log.debug('Device last seen updated to: %s', self._dev_last_seen)
# complete message encode/decode methods **********************************
@property
def complete(self):
self.log.debug('Returning current value of complete message: '
'%s,%s,%s,%s,%s,%s,%s,%s,%s,%s',
self._ref, self._dest_addr, self._dest_port,
self._source_addr, self._source_port,
self._msg_type, self._dev_name, self._dev_addr,
self._dev_status, self._dev_last_seen)
return '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s' % (
self._ref, self._dest_addr, self._dest_port,
self._source_addr, self._source_port,
self._msg_type, self._dev_name, self._dev_addr,
self._dev_status, self._dev_last_seen)
@complete.setter
def complete(self, value):
if isinstance(value, str):
self.temp_list = value.split(',')
if len(self.temp_list) >= 10:
self.log.debug('Message was properly formatted for decoding')
self.ref = self.temp_list[0]
self.dest_addr = self.temp_list[1]
self.dest_port = self.temp_list[2]
self.source_addr = self.temp_list[3]
self.source_port = self.temp_list[4]
self.msg_type = self.temp_list[5]
self.dev_name = self.temp_list[6]
self.dev_addr = self.temp_list[7]
self.dev_status = self.temp_list[8]
self.dev_last_seen = self.temp_list[9]
| csm0042/rpihome_v3 | rpihome_v3/messages/get_device_state.py | Python | gpl-3.0 | 11,430 |
"""
Test functions in iwords.py. To run with pytest.
"""
import os
from tempfile import NamedTemporaryFile
import iwords
def test_get_freqs():
with NamedTemporaryFile() as fp:
fp.write(b'Hello world!')
fp.flush()
assert iwords.get_freqs([fp.name]) == {'hello': 0.5, 'world': 0.5}
def test_clean():
assert iwords.clean('Cómo estás') == 'como estas'
def test_run():
assert os.system('./iwords.py --help') == 0
with NamedTemporaryFile() as fp:
fp.write(b'Hello world!')
fp.flush()
assert os.system(f'./iwords.py {fp.name}') == 0
assert os.system(f'./iwords.py {fp.name} --show-score') == 0
assert os.system(f'./iwords.py {fp.name} --limit 1') == 0
assert os.system(f'./iwords.py {fp.name} --learn /usr/share/dict/words') == 0
| jordibc/destaca_palabras | test_iwords.py | Python | gpl-3.0 | 823 |
import ujson as json
import pytz
from datetime import datetime
import requests
from django.http import HttpResponse, Http404
from django.shortcuts import render
def index(request):
return render(request, 'timetable/index.html', {})
def query(request, term):
ret = []
if len(term) > 1:
r = requests.get(
"https://transport.opendata.ch/v1/locations?query=" + term)
data = r.json()
if 'stations' in data:
for match in data['stations'][:6]:
ret.append(match['name'])
return HttpResponse(json.dumps(ret), content_type="application/json")
def connection(request, departure, selected_time, start, to):
start = start.replace("$.$", "/")
to = to.replace("$.$", "/")
if start and to:
tz = pytz.timezone('Europe/Zurich')
dt = datetime.fromtimestamp(int(selected_time), tz=tz)
params = {
"from": start,
"to": to,
"time": str(dt.time())[:5],
"date": str(dt.date()),
"isArrivalTime": departure,
}
r = requests.get("https://transport.opendata.ch/v1/connections", params=params)
data = r.json()
connections = []
# print(r.text)
try:
for con in data["connections"]:
sections = []
name = ""
for section in con["sections"]:
if not name and section["journey"]:
name = "{} nach {}".format(section["journey"]["name"], section["arrival"]["location"]["name"])
sections.append(
{
"from": section["departure"]["location"]["name"],
"from_platform": section["departure"]["platform"],
"from_time": section["departure"]["departureTimestamp"],
"to": section["arrival"]["location"]["name"],
"to_platform": section["arrival"]["platform"],
"to_time": section["arrival"]["arrivalTimestamp"],
"route": "{} nach {}".format(section["journey"]["name"],
section["arrival"]["location"]["name"]) if section[
"journey"] else "Fussweg",
"capacity1st": section["journey"]["capacity1st"] if section["journey"] else None,
"capacity2nd": section["journey"]["capacity2nd"] if section["journey"] else None,
}
)
connections.append(
{
"transfers": con["transfers"],
"arrivalTimestamp": con["to"]["arrivalTimestamp"],
"departureTimestamp": con["from"]["departureTimestamp"],
"sections": sections,
"name": name,
"platform": con["from"]["platform"],
"from": con["from"]["station"]["name"],
"to": con["to"]["station"]["name"],
"capacity1st": con["capacity1st"],
"capacity2nd": con["capacity2nd"],
}
)
prev_time = 0 if not connections else connections[0]["arrivalTimestamp"] - 60
next_time = selected_time if not connections else connections[-1]["departureTimestamp"] + 60
return HttpResponse(json.dumps({"connections": connections, "nextTime": next_time, "prevTime": prev_time}),
content_type="application/json")
except KeyError:
raise
raise Http404("No connections found")
except AttributeError:
raise
raise Http404("No connections found")
raise Http404("No connections found")
| drbeni/fahrplan | timetable/views.py | Python | gpl-3.0 | 3,943 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
import threading
import traceback
from sickbeard import logger
from sickbeard.exceptions import ex
class Scheduler:
def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), runImmediately=True, threadName="ScheduledThread", silent=False):
if runImmediately:
self.lastRun = datetime.datetime.fromordinal(1)
else:
self.lastRun = datetime.datetime.now()
self.action = action
self.cycleTime = cycleTime
self.thread = None
self.threadName = threadName
self.silent = silent
self.initThread()
self.abort = False
def initThread(self):
if self.thread == None or not self.thread.isAlive():
self.thread = threading.Thread(None, self.runAction, self.threadName)
def timeLeft(self):
return self.cycleTime - (datetime.datetime.now() - self.lastRun)
def forceRun(self):
if not self.action.amActive:
self.lastRun = datetime.datetime.fromordinal(1)
return True
return False
def runAction(self):
while True:
currentTime = datetime.datetime.now()
if currentTime - self.lastRun > self.cycleTime:
self.lastRun = currentTime
try:
if not self.silent:
logger.log(u"Starting new thread: "+self.threadName, logger.DEBUG)
self.action.run()
except Exception, e:
logger.log(u"Exception generated in thread "+self.threadName+": " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
if self.abort:
self.abort = False
self.thread = None
return
time.sleep(1)
| stephanehenry27/Sickbeard-anime | sickbeard/scheduler.py | Python | gpl-3.0 | 2,593 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=W0403,R0902,R0903,R0904,W0212
from __future__ import (absolute_import, division, print_function)
from HFIR_4Circle_Reduction import mpl2dgraphicsview
import numpy as np
import os
from qtpy.QtCore import Signal as pyqtSignal
class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
"""
Customized 2D detector view
"""
class MousePress(object):
RELEASED = 0
LEFT = 1
RIGHT = 3
newROIDefinedSignal = pyqtSignal(int, int, int, int) # return coordinate of the
def __init__(self, parent):
"""
:param parent:
:return:
"""
mpl2dgraphicsview.Mpl2dGraphicsView.__init__(self, parent)
# connect the mouse motion to interact with the canvas
self._myCanvas.mpl_connect('button_press_event', self.on_mouse_press_event)
self._myCanvas.mpl_connect('button_release_event', self.on_mouse_release_event)
self._myCanvas.mpl_connect('motion_notify_event', self.on_mouse_motion)
# class variables
self._myPolygon = None # matplotlib.patches.Polygon
# class status variables
self._roiSelectMode = False
# region of interest. None or 2 tuple of 2-tuple for upper left corner and lower right corner
# mouse positions as start and end
self._roiStart = None
self._roiEnd = None
# mouse
self._mousePressed = Detector2DView.MousePress.RELEASED
# mouse position and resolution
self._currX = 0.
self._currY = 0.
self._resolutionX = 0.005
self._resolutionY = 0.005
# parent window
self._myParentWindow = None
return
def clear_canvas(self):
"""
clear canvas (override base class)
:return:
"""
# clear the current record
self._myPolygon = None
# reset mouse selection ROI
# set
self._roiStart = None
self._roiEnd = None
# call base class
super(Detector2DView, self).clear_canvas()
return
def enter_roi_mode(self, roi_state):
"""
Enter or leave the region of interest (ROI) selection mode
:return:
"""
assert isinstance(roi_state, bool), 'ROI mode state {} must be a boolean but not a {}.' \
''.format(roi_state, type(roi_state))
# set
self._roiSelectMode = roi_state
if roi_state:
# new in add-ROI mode
self.remove_roi()
else:
# reset roi start and roi end
self._roiStart = None
self._roiEnd = None
return
def integrate_roi_linear(self, exp_number, scan_number, pt_number, output_dir):
"""
integrate the 2D data inside region of interest along both axis-0 and axis-1 individually.
and the result (as 1D data) will be saved to ascii file.
the X values will be the corresponding pixel index either along axis-0 or axis-1
:return:
"""
def save_to_file(base_file_name, axis, array1d, start_index):
"""
save the result (1D data) to an ASCII file
:param base_file_name:
:param axis:
:param array1d:
:param start_index:
:return:
"""
file_name = '{0}_axis_{1}.dat'.format(base_file_name, axis)
wbuf = ''
vec_x = np.arange(len(array1d)) + start_index
for x, d in zip(vec_x, array1d):
wbuf += '{0} \t{1}\n'.format(x, d)
ofile = open(file_name, 'w')
ofile.write(wbuf)
ofile.close()
return
matrix = self.array2d
assert isinstance(matrix, np.ndarray), 'A matrix must be an ndarray but not {0}.'.format(type(matrix))
# get region of interest
if self._roiStart is None:
self._roiStart = (0, 0)
if self._roiEnd is None:
self._roiEnd = matrix.shape
ll_row = min(self._roiStart[0], self._roiEnd[0])
ll_col = min(self._roiStart[1], self._roiEnd[1])
ur_row = max(self._roiStart[0], self._roiEnd[0])
ur_col = max(self._roiStart[1], self._roiEnd[1])
#roi_matrix = matrix[ll_col:ur_col, ll_row:ur_row]
#sum_0 = roi_matrix.sum(0)
#sum_1 = roi_matrix.sum(1)
roi_matrix = matrix[ll_col:ur_col, ll_row:ur_row]
sum_0 = roi_matrix.sum(0)
sum_1 = roi_matrix.sum(1)
# write to file
base_name = os.path.join(output_dir, 'Exp{0}_Scan{1}_Pt{2}'.format(exp_number, scan_number, pt_number))
save_to_file(base_name, 0, sum_0, ll_row)
save_to_file(base_name, 1, sum_1, ll_col)
message = 'Integrated values are saved to {0}...'.format(base_name)
return message
@property
def is_roi_selection_drawn(self):
"""
whether ROI is drawn
:return:
"""
is_drawn = not (self._myPolygon is None)
return is_drawn
def get_roi(self):
"""
:return: A list for polygon0
"""
assert self._roiStart is not None
assert self._roiEnd is not None
# rio start is upper left, roi end is lower right
lower_left_x = min(self._roiStart[0], self._roiEnd[0])
lower_left_y = min(self._roiStart[1], self._roiEnd[1])
lower_left = lower_left_x, lower_left_y
# ROI upper right
upper_right_x = max(self._roiStart[0], self._roiEnd[0])
upper_right_y = max(self._roiStart[1], self._roiEnd[1])
upper_right = upper_right_x, upper_right_y
return lower_left, upper_right
def plot_detector_counts(self, raw_det_data, title=None):
"""
plot detector counts as 2D plot
:param raw_det_data:
:return:
"""
x_min = 0
x_max = raw_det_data.shape[0]
y_min = 0
y_max = raw_det_data.shape[1]
count_plot = self.add_plot_2d(raw_det_data, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max,
hold_prev_image=False)
if title is None:
title = 'No Title'
self.set_title(title)
if self._myPolygon is not None:
print ('[DB...BAT...] Add PATCH')
self._myCanvas.add_patch(self._myPolygon)
else:
print ('[DB...BAT...] NO PATCH')
print ('[DB...BAT...AFTER] ROI Rect: {0}. 2D plot: {1}'.format(self._myPolygon, count_plot))
return
def plot_roi(self):
""" Plot region of interest (as rectangular) to the canvas from the region set from
:return:
"""
# check
assert self._roiStart is not None, 'Starting point of region-of-interest cannot be None'
assert self._roiEnd is not None, 'Ending point of region-of-interest cannot be None'
# create a vertex list of a rectangular
vertex_array = np.ndarray(shape=(4, 2))
# upper left corner
vertex_array[0][0] = self._roiStart[0]
vertex_array[0][1] = self._roiStart[1]
# lower right corner
vertex_array[2][0] = self._roiEnd[0]
vertex_array[2][1] = self._roiEnd[1]
# upper right corner
vertex_array[1][0] = self._roiEnd[0]
vertex_array[1][1] = self._roiStart[1]
# lower left corner
vertex_array[3][0] = self._roiStart[0]
vertex_array[3][1] = self._roiEnd[1]
# register
if self._myPolygon is not None:
self._myPolygon.remove()
self._myPolygon = None
self._myPolygon = self._myCanvas.plot_polygon(vertex_array, fill=False, color='w')
return
def remove_roi(self):
"""
Remove the rectangular for region of interest
:return:
"""
print ('[DB...BAT] Try to remove ROI {0}'.format(self._myPolygon))
if self._myPolygon is not None:
# polygon is of type matplotlib.patches.Polygon
self._myPolygon.remove()
self._myPolygon = None
# FUTURE-TO-DO: this should be replaced by some update() method of canvas
self._myCanvas._flush()
self._roiStart = None
self._roiEnd = None
else:
print ('[NOTICE] Polygon is None. Nothing to remove')
return
def on_mouse_motion(self, event):
"""
Event handing as mouse is moving
:param event:
:return:
"""
# skip if the mouse cursor is still outside of the canvas
if event.xdata is None or event.ydata is None:
return
# check: _currX and _currY must be specified
assert self._currX is not None and self._currY is not None
# operation if the displacement is too small
if abs(event.xdata - self._currX) < self.resolutionX() and abs(event.ydata - self._currY) < self.resolutionY():
return
if self._mousePressed == Detector2DView.MousePress.RELEASED:
# No operation if mouse is not pressed
pass
elif self._mousePressed == Detector2DView.MousePress.RIGHT:
# No operation if mouse' right button is pressed
pass
elif self._mousePressed == Detector2DView.MousePress.LEFT:
if self._roiSelectMode is True:
# in ROI selection mode, update the size
self.update_roi_poly(event.xdata, event.ydata)
# update current mouse' position
self._currX = event.xdata
self._currY = event.ydata
return
def on_mouse_press_event(self, event):
"""
:param event:
:return:
"""
# return if the cursor position is out of canvas
if event.xdata is None or event.ydata is None:
return
# update mouse' position
self._currX = event.xdata
self._currY = event.ydata
# update mouse' pressed state
if event.button == 1:
self._mousePressed = Detector2DView.MousePress.LEFT
elif event.button == 3:
self._mousePressed = Detector2DView.MousePress.RIGHT
# do something?
if self._roiSelectMode is True and self._mousePressed == Detector2DView.MousePress.LEFT:
# start to select a region
self._roiStart = (self._currX, self._currY)
return
def on_mouse_release_event(self, event):
"""
:param event:
:return:
"""
# return without any operation if mouse cursor is out side of canvas
if event.xdata is None or event.ydata is None:
return
# update mouse' position
self._currX = event.xdata
self._currY = event.ydata
# update button
prev_mouse_pressed = self._mousePressed
self._mousePressed = Detector2DView.MousePress.RELEASED
# do something
if self._roiSelectMode and prev_mouse_pressed == Detector2DView.MousePress.LEFT:
# end the ROI selection mode
self.update_roi_poly(self._currX, self._currY)
# send a signal to parent such that a rew ROI is defined
self.newROIDefinedSignal.emit(self._roiStart[0], self._roiStart[1], self._roiEnd[0], self._roiEnd[1])
# END-IF
return
def resolutionX(self):
"""
:return:
"""
return (self.x_max - self.x_min) * self._resolutionX
def resolutionY(self):
"""
:return:
"""
return (self.y_max - self.y_min) * self._resolutionY
def set_parent_window(self, parent_window):
"""
Set the parent window for synchronizing the operation
:param parent_window:
:return:
"""
assert parent_window is not None, 'Parent window cannot be None'
self._myParentWindow = parent_window
self.newROIDefinedSignal.connect(self._myParentWindow.evt_new_roi)
return
def set_roi(self, lower_left_corner, upper_right_corner, plot=True):
"""
set ROI to class variables
:param lower_left_corner:
:param upper_right_corner:
:param plot: if True, then plot ROI
:return:
"""
# check inputs
assert len(lower_left_corner) == 2, 'Lower left corner row/col coordinate {0} must have 2 items.' \
''.format(lower_left_corner)
assert len(upper_right_corner) == 2, 'Upper right corner row/col coordinate {0} must have 2 items.' \
''.format(upper_right_corner)
# set lower left corner and upper right corner
self._roiStart = lower_left_corner
self._roiEnd = upper_right_corner
# plot
if plot:
self.plot_roi()
return
def update_roi_poly(self, cursor_x, cursor_y):
"""Update region of interest. It is to
(1) remove the original polygon
(2) draw a new polygon
:return:
"""
# check
assert isinstance(cursor_x, float), 'Cursor x coordination {0} must be a float.'.format(cursor_x)
assert isinstance(cursor_y, float), 'Cursor y coordination {0} must be a float.'.format(cursor_y)
# remove the original polygon
if self._myPolygon is not None:
self._myPolygon.remove()
self._myPolygon = None
# self.canvas._flush()
# set RIO end
self._roiEnd = [cursor_x, cursor_y]
# plot the new polygon
self.plot_roi()
# # update: no need to do this!
# if self._myPolygon is not None:
# self._myParentWindow.do_apply_roi()
return
| mganeva/mantid | scripts/HFIR_4Circle_Reduction/detector2dview.py | Python | gpl-3.0 | 14,054 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible import constants as C
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = self._play.gather_subset
gather_timeout = self._play.gather_timeout
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {
'gather_subset': gather_subset,
}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self._host_states[host.name] = HostState(blocks=self._blocks)
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & self.FAILED_ALWAYS == 0)
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == self.ITERATING_RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
| 2ndQuadrant/ansible | lib/ansible/executor/play_iterator.py | Python | gpl-3.0 | 27,052 |
import unittest
from vsvbp.container import Item, Bin, Instance
from vsvbp.generator import generator
class ItemBinTestCase(unittest.TestCase):
def setUp(self):
self.i1 = Item([1,2,9]); self.i2 = Item([4,5,3])
self.i3 = Item([0,1,0]); self.i4 = Item([9,8,7])
self.i1.size = 1; self.i2.size = 2; self.i3.size = 3; self.i4.size = 0;
self.items = [self.i4, self.i3, self.i2, self.i1]
self.b1=Bin([5,8,4]); self.b2=Bin([100,0,100]); self.b3=Bin([1,2,9]);
self.b1.size=1; self.b2.size=2; self.b3.size=3;
self.bins = [self.b1,self.b2,self.b3]
self.ins = Instance(self.items, self.bins)
def testInstance(self):
assert str(self.ins)=="Items:\n"+str(self.items)+"\nBins:\n"+str(self.bins)
# def testGenerator(self):
# iss=generator(2,2,.5,seed=0)
# assert iss.items[1].requirements==[356, 197]
# assert iss.bins[1].capacities == [516,411] | FreshetDMS/FDCapacityPlanner | tests/test_generator.py | Python | gpl-3.0 | 941 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
class TasksTest(TestCase):
"""
Tests geonode.messaging
"""
def setUp(self):
self.adm_un = "admin"
self.adm_pw = "admin"
| kartoza/geonode | geonode/tasks/tests.py | Python | gpl-3.0 | 1,025 |
# -*- coding: utf-8 -*-
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
import gettext
PluginLanguageDomain = "OpenWebif"
PluginLanguagePath = "Extensions/OpenWebif/locale"
def localeInit():
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
| pr2git/e2openplugin-OpenWebif | plugin/__init__.py | Python | gpl-3.0 | 509 |
import math
from scipy import integrate, optimize
import numpy as np
import matplotlib.pyplot as plt
import numpy. polynomial.chebyshev as cheb
from matplotlib import rc
iphi = lambda x: np.sqrt(9.8 / 8.) * (1. - np.cos(x)) / \
np.sqrt((x - np.sin(x)) * np.sin(x / 2.))
def getChebNodes(N):
return -np.cos(np.array(range(N + 1)) * np.pi / N)
def phi(t):
tol = 1e-5 # ~ (machine epsilon)^(1/3)
c = np.sqrt(9.8 * 3. / 8.)
s = len(t)
y = np.zeros(len(t))
for i in range(s):
if t[i] <= tol:
y[i] = c * t[i]
else:
yt, err = integrate.quad(iphi, tol, t[i], epsabs=1e-10)
y[i] = c * tol + yt
return y
def getTheta(x):
return [optimize.ridder(lambda x: 1. / 8. * (x - np.sin(x)) - xi, 0, 2 * np.pi) for xi in x]
def fX(a, p, a0, af):
return 2 * pow((a - a0) / (af - a0), p) - 1
def fA(x, p, a0, af):
return (af - a0) * pow((x + 1.) / 2., 1. / p) + a0
def main():
M = 3 * pow(2, 5)
N = 15
# good values
a1 = np.pi / 8.
a2 = np.pi / 4.
alphas = np.array([i / float(M) for i in range(1, M + 3)])
r = np.zeros((len(alphas), 2))
err = np.zeros((len(alphas), 2))
A1 = np.linspace(0, a1, 50)
T1 = getTheta(A1)
Phitrue1 = phi(T1)
A2 = np.linspace(a1, a2, 50)
T2 = getTheta(A2)
Phitrue2 = phi(T2)
for k in range(len(alphas)):
p = alphas[k]
x = getChebNodes(N)
print x
#ax1 = [np.pi/8.*pow((xi+1.)/2,1./p) for xi in x]
ax1 = fA(x, p, 0, a1)
ax2 = fA(x, p, a2, a1)
#ax2 = [np.pi/8.+np.pi/8.*pow((xi+1.)/2,1./p) for xi in x]
theta1 = getTheta(ax1)
theta2 = getTheta(ax2)
# print "ax2"
#print (ax2-1./8.*(theta2-np.sin(theta2)))
# print "Theta"
# print theta
#print (1./8*(theta-np.sin(theta))-ax)
phi1 = phi(theta1)
phi2 = phi(theta2)
fp1 = cheb.chebfit(x, phi1, N)
fp2 = cheb.chebfit(x, phi2, N)
print "Chebyshev Coeffs"
print fp2
r[k, 0] = abs(fp1[-1])
r[k, 1] = abs(fp2[-1])
err[k, 0] = np.linalg.norm(
Phitrue1 - cheb.chebval(fX(A1, p, 0, a1), fp1))
err[k, 1] = np.linalg.norm(
Phitrue2 - cheb.chebval(fX(A2, p, a2, a1), fp2))
print "Nth coefficient"
print r
print "Error"
print err
# print alphas
p1 = 1. / 3.
p2 = 5. / 12.
ax1 = fA(x, p1, 0., a1)
ax2 = fA(x, p2, a2, a1)
theta1 = getTheta(ax1)
theta2 = getTheta(ax2)
phi1 = phi(theta1)
phi2 = phi(theta2)
fp1 = cheb.chebfit(x, phi1, N)
fp2 = cheb.chebfit(x, phi2, N)
print fp1
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.subplots_adjust(hspace=0.4)
plt.subplot(211)
plt.semilogy(alphas, r)
plt.legend([r"$\phi_1(A)$", r"$\phi_2(A)$"])
xlab = ['1/6', '1/4', '1/3', '5/12', '1/2', '2/3', '3/4', '5/6', '1']
ticks = [1. / 6., 1. / 4., 1. / 3., 5. / 12.,
1. / 2., 2. / 3., 3. / 4., 5. / 6., 1.]
plt.xticks(ticks, xlab)
plt.grid(True)
plt.title(r'$\phi_i(x) \approx \sum_k a_k T_k(cx^{\alpha}-1)$')
plt.ylabel(r'a_N')
plt.xlabel(r'$\alpha$')
plt.subplot(212)
plt.xlabel(r'$\alpha$')
plt.ylabel(r'$\phi_i(x)-\phi_i^N(x)$')
plt.semilogy(alphas, err)
plt.xticks(ticks, xlab)
plt.grid(True)
plt.title('Error')
plt.legend([r"$\phi_1(A)$", r"$\phi_2(A)$"])
f = open("blah.txt", "w")
MM = 200
t = np.linspace(0, 2 * np.pi, MM)
aa = 1. / 8. * (t - np.sin(t))
phit = phi(t)
phit1 = cheb.chebval(fX(aa[0:MM / 2], p1, 0., a1), fp1)
phit2 = cheb.chebval(fX(aa[MM / 2:], p2, a2, a1), fp2)
for k in range(len(t)):
if k < MM / 2:
ha = phit1[k]
else:
ha = phit2[k - MM / 2]
f.write("%s %s %s\n" % (aa[k], phit[k] - ha, ha))
f.close()
# plt.show()
print phi([2 * np.pi])
if __name__ == "__main__":
main()
| liebannam/pipes | interpolation_tests/intphisucks.py | Python | gpl-3.0 | 3,989 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-27 16:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reddit', '0009_auto_20170527_1814'),
]
operations = [
migrations.AddField(
model_name='question',
name='best_response',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='best_response', to='reddit.Response', verbose_name='Mejor Respuesta'),
),
]
| hcosta/escueladevideojuegos.net-backend-django | edv/reddit/migrations/0010_question_best_response.py | Python | gpl-3.0 | 628 |
"""Some eastereggs just for fun"""
from utils import Filehandler
class EasterEggFunctions(object):
"""Easter Egg functions"""
def __init__(self):
self.fhandler = Filehandler()
def darkwing(self, channel, callback, msg=None, nck=None, hq=None, keys=None, pb=None):
"""Post a random line"""
filename = "./mylines/darkwing.txt"
myline = self.fhandler.getrandomline(filename)
callback.say(channel, myline)
def balu(self, channel, callback, msg=None, nck=None, hq=None, keys=None, pb=None):
"""Post a random line"""
filename = "./mylines/balu.txt"
myline = self.fhandler.getrandomline(filename)
callback.say(channel, myline)
def raspel(self, channel, callback, msg=None, nck=None, hq=None, keys=None, pb=None):
"""Post url to raspel"""
filename = "./myurls/raspel.url"
url = self.fhandler.getcontent(filename)
callback.say(channel, url)
| fast90/christian | commands/eggfunctions.py | Python | gpl-3.0 | 964 |
import helpers
from config import render, db
class Keyword(object):
def GET(self):
keyword_count = db.query('SELECT KEYWORD,\
COUNT(DISTINCT IPKEY) AS PACKAGES,\
COUNT(DISTINCT UUID) AS HOSTS\
FROM GLOBAL_KEYWORDS NATURAL JOIN KEYWORDS\
NATURAL JOIN INSTALLED_PACKAGES GROUP BY KEYWORD')
keyword_data = dict()
for t in keyword_count:
keyword_data[t['KEYWORD']] = {'HOSTS':t['HOSTS'], 'PACKAGES':t['PACKAGES']}
if helpers.is_json_request():
return helpers.serialize(keyword_data)
else:
# generate plot
x_ticklabels = keyword_data.keys()
y_values = [ keyword_data[k]['PACKAGES'] for k in x_ticklabels ]
keyword_plot = helpers.barchart(title = 'Installed packages per keyword',
x_label = 'Keyword', y_label = 'Number of Packages',
x_ticklabels = x_ticklabels, y_values = y_values)
return render.keyword(keyword_data, keyword_plot)
| vikraman/gentoostats | server/kwd.py | Python | gpl-3.0 | 1,062 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import sys
import random
import itertools
from gensim.models.phrases import Phrases
if sys.version_info[0] >= 3:
unicode = str
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
WORDS = ['PHRASE%i' % i for i in range(10)] # selected words for phrases
class TestPhrasesModel(unittest.TestCase):
@staticmethod
def get_word():
"""Generate random word from letters A-Z."""
word_len = random.randint(1, 12)
return ''.join(chr(random.randint(65, 80)) for i in range(word_len))
@staticmethod
def get_sentence(size=10000):
"""Generator for random sentences.
10% probability to return sentence containing only preselected words"""
for i in range(size):
if random.random() > 0.9:
yield [WORDS[random.randint(0, len(WORDS) -1)] for i in range(random.randint(2, 10))] + ["."]
else:
yield [TestPhrasesModel.get_word() for i in range(random.randint(2, 10))] + ["."]
def testUpdate(self):
"""Test adding one token.
"""
special_token = 'non_present_token'
phrases = Phrases(TestPhrasesModel.get_sentence(), min_count=1)
present = special_token in phrases.vocab
freq = phrases.vocab[special_token]
phrases.add_vocab([[special_token]])
freq_after_change = phrases.vocab[special_token]
present_after_change = special_token in phrases.vocab
self.assertEqual(present, False, msg="Non-present token is marked as present.")
self.assertEqual(present_after_change, True, msg="Present token is marked as non-present.")
self.assertEqual(freq, 0, msg="Predicted non-zero freq for non-present token.")
self.assertEqual(freq_after_change, 1, msg="Predicted non 1 freq for token inserted once.")
def testFreqCount(self):
"""Test adding one token.
"""
special_token = 'non_present_token'
phrases = Phrases(None, min_count=1)
current = iter([])
for i in range(100):
current = itertools.chain(current, iter([[special_token]]), TestPhrasesModel.get_sentence(i))
phrases.add_vocab(current)
freq = phrases.vocab[special_token]
self.assertTrue(freq >= 100)
current = iter([])
for i in range(100):
current = itertools.chain(current, iter([[special_token]]), TestPhrasesModel.get_sentence(i))
phrases.add_vocab(current)
freq = phrases.vocab[special_token]
self.assertTrue(freq >= 200)
#endclass TestPhrasesModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| janrygl/gensim | gensim/test/test_count_minimal_sketch_counter.py | Python | gpl-3.0 | 3,092 |
from __future__ import absolute_import
#from urllib.parse import urlparse, urlunparse
from builtins import str
from builtins import range
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,AdminPasswordChangeForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.shortcuts import render
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
#from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.template import loader
import random
import string
#import user
from .forms import UserForm
from stack_configs.stack_functions import createInfluxDB
from stack_configs.ldap_functions import addToLDAPGroup,resetLDAPpassword,createLDAPuser
from stack_configs.grafana_functions import GrafanaUser,testObj
import logging
logger = logging.getLogger(__name__)
# Create your views here.
# Create your views here.
#from django.contrib.auth.forms import UserCreationForm
def index(request):
template = loader.get_template('welcome.html')
result="welcome"
context = {
'content':result,
'has_permission':request.user.is_authenticated,
'is_popup':False,
'title':'welcome!',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def create_account(request):
template = loader.get_template('admin/base_site.html')
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
new_user = User.objects.create_user(**form.cleaned_data)
#new_user.is_staff=True
#new_user.save()
if (createLDAPuser(new_user,password)):
if (addToLDAPGroup(new_user.username,'active')):
if (addToLDAPGroup(new_user.username,'editor')):
result=createAndConfigureGrafana(new_user,password)
if (result.status):
if createInfluxDB(new_user): #creates a user database in influx
return HttpResponseRedirect('/thanks/')
return HttpResponseRedirect('/account_create_error/')
else:
form = UserForm()
context = {
'has_permission':request.user.is_authenticated,
'is_popup':False,
'form':form,
'title':'New User Creation',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return render(request,'form.html',context)
def thanks(request):
template = loader.get_template('thanks.html')
context = {
'content':'Thanks. Please log in to your dashboard',
'title':'Your account has been created',
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def account_create_error(request):
template = loader.get_template('admin/base_site.html')
context = {
'content':'Sorry. Something went wrong during the creation of your account. Please contact your administrator',
'title':'Error',
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def zibawa_password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
ZIBAWA NOTE. THIS VIEW CODE IS COPIED FROM DJANGO DEFAULT VIEW WITH MINOR
MODIFICATIONS TO UPDATE PASSWORD IN LDAP (INSTEAD OF THE DJANGO DATABASE)
https://github.com/django/django/blob/master/django/contrib/auth/views.py
Check the hash in a password reset link and present a form for entering a
new password.
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)"""
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
#ZIBAWA MODIFICATIONS START HERE
new_password = form.cleaned_data['new_password1']
if(resetLDAPpassword(user.username,new_password)):
#change Grafana password
grafana_user=GrafanaUser(request.user.id, request.user.username,new_password,request.user.email)
logger.debug('resetting Grafana password for %s',request.user.username)
if not (grafana_user.changeGrafanaPassword()):
#if fails, currently we log but carry on regardless.
logger.warning('couldnt reset Grafana password for %s',request.user.username)
return HttpResponseRedirect(post_reset_redirect)
else:
#if result from LDAP is not what we expect, or if no result
logger.warning('couldnt reset LDAP password')
title = _('Could not reset LDAP password')
#ZIBAWA MODIFICATIONS END HERE
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@csrf_protect
@login_required
def zibawa_password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=SetPasswordForm,
extra_context=None):
'''warnings.warn("The password_change() view is superseded by the "
"class-based PasswordChangeView().",
RemovedInDjango21Warning, stacklevel=2)'''
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
#ZIBAWA MODIFICATIONS START HERE
new_password = form.cleaned_data['new_password1']
if(resetLDAPpassword(request.user.username,new_password)):
logger.debug('reset LDAP password')
update_session_auth_hash(request, form.user)
#change Grafana password
grafana_user=GrafanaUser(request.user.id, request.user.username,new_password,request.user.email)
logger.debug('resetting Grafana password for %s',request.user.username)
if not (grafana_user.changeGrafanaPassword()):
#if fails, currently we carry on regardless.
logger.warning('couldnt reset Grafana password for %s',request.user.username)
return HttpResponseRedirect(post_change_redirect)
#if result from LDAP is not what we expect, or if no result
else:
logger.warning('couldnt reset LDAP password')
context = {
'form': form,
'title': _('Could not reset LDAP password'),
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return TemplateResponse(request, template_name, context)
#ZIBAWA MODIFICATIONS END HERE
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def createAndConfigureGrafana(zibawa_user,password):
grafana_user=GrafanaUser(zibawa_user.id, zibawa_user.username,password,zibawa_user.email)
result=testObj("GrafanaAccount",True,"Your account already exists on Grafana from a previous installation please contact your administrator")
if not (grafana_user.exists()):
result=testObj("GrafanaAccount",False,"We were unable to create your dashboard account on Grafana, please contact your adminitrator")
logger.info('trying to create grafana user')
if grafana_user.create():
result=testObj("GrafanaAccount",True, "Your account has been created, but not configured")
logger.info("trying to find non grafana admin org")
if not (grafana_user.get_orgID()):
logger.info("no org found for user, adding to own org")
grafana_user.add_to_own_org()
#run get org id again, to ensure created correctly.
grafana_user.get_orgID()
logger.info("running fix permissions for Grafana")
grafana_user.fix_permissions()
logger.info("running add datasource for Grafana")
if (grafana_user.add_datasource()):
result=testObj("GrafanaAccount",True,"Your account has been created and configured")
return result | zibawa/zibawa | front/views.py | Python | gpl-3.0 | 12,712 |
# Copyright 2016-2017 Alan F Rubin, Daniel C Esposito
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
"""
Enrich2 aligner module
======================
Module for alignment of variants to the wild type sequence.
This module is optional, and using it will dramatically increase runtime when
counting variants. It is only recommended for users who need to count
insertion and deletion variants (i.e. not coding sequences).
"""
from ctypes import c_int
import numpy as np
import logging
from ..base.utils import log_message
_AMBIVERT = False
try:
from ambivert.ambivert import gapped_alignment_to_cigar
from ambivert import align
# Reset the logging handlers after loading ambivert
for handler in logging.getLogger("ambivert").handlers:
handler.close()
logging.getLogger('ambivert').handlers = []
for handler in logging.getLogger().handlers:
handler.close()
logging.getLogger().handlers = []
logging.captureWarnings(False)
_AMBIVERT = True
except ImportError:
pass
__all__ = [
"Aligner"
]
#: Default similarity matrix used by the aligner.
#: User-defined matrices must have this format.
_simple_similarity = {
'A': {'A': 1, 'C': -1, 'G': -1, 'T': -1, 'N': 0, 'X': 0},
'C': {'A': -1, 'C': 1, 'G': -1, 'T': -1, 'N': 0, 'X': 0},
'G': {'A': -1, 'C': -1, 'G': 1, 'T': -1, 'N': 0, 'X': 0},
'T': {'A': -1, 'C': -1, 'G': -1, 'T': 1, 'N': 0, 'X': 0},
'N': {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0, 'X': 0},
'X': {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0, 'X': 0},
'gap_open': -1,
'gap_extend': 0
}
class Aligner(object):
"""
Class for performing local alignment of two DNA sequences.
This class implements `Needleman-Wunsch <http://en.wikipedia.org/wiki/
Needleman%E2%80%93Wunsch_algorithm>`_ local alignment.
The :py:class:`~enrich2.sequence.aligner.Aligner` requires a scoring matrix
when created. The format is a nested dictionary, with a special ``'gap_open'``
entry for the gap_open penalty (this value is used for both gap_open opening and gap_open
extension).
The ``'X'`` nucleotide is a special case for unresolvable mismatches in
:py:class:`~enrich2.libraries.overlap.OverlapSeqLib` variant data.
Parameters
----------
similarity : `dict`
Similarity matrix used by the aligner, must contain a cost mapping
between each of 'A', 'C', 'G', 'T', 'N', 'X'.
backend : {'ambivert', 'enrich2'}, default: 'ambivert'
Select the alignment backend. If backend is 'ambivert' then
similarity is ignored.
Attributes
----------
similarity : `dict`
Similarity matrix used by the aligner, must contain a cost mapping
between each of 'A', 'C', 'G', 'T', 'N', 'X'.
matrix : :py:class:`~numpy.ndarray`
The dynamically computed cost matrix.
seq1 : `str`
Reference sequence.
seq2 : `str`
The sequence that is to be aligned.
calls : `int`
Number of times `align` has been performed.
Methods
-------
align
Align two sequences using ``Needleman-Wusch``.
Notes
-----
This class implements `Needleman-Wunsch <http://en.wikipedia.org/wiki/
Needleman%E2%80%93Wunsch_algorithm>`_ local alignment.
"""
_MAT = 1 # match
_INS = 2 # insertion (with respect to wild type)
_DEL = 3 # deletion (with respect to wild type)
_END = 4 # end of traceback
def __init__(self, similarity=_simple_similarity, backend='ambivert'):
similarity_keys = list(similarity.keys())
if 'gap_open' in similarity_keys:
similarity_keys.remove('gap_open')
if 'gap_extend' in similarity_keys:
similarity_keys.remove('gap_extend')
for key in similarity_keys:
keys_map_to_dicts = all(x in similarity[key]
for x in similarity_keys)
symmetrical = len(similarity[key]) != len(similarity_keys)
if not keys_map_to_dicts or symmetrical:
raise ValueError("Asymmetrical alignment scoring matrix")
self.similarity = similarity
if 'gap_open' not in self.similarity:
raise ValueError(
"No gap_open open penalty in alignment scoring matrix.")
if 'gap_extend' not in self.similarity:
raise ValueError(
"No gap_open extend penalty in alignment scoring matrix.")
self.matrix = None
self.seq1 = None
self.seq2 = None
self.calls = 0
# TODO: uncomment aligner backend
# global _AMBIVERT
# if backend == 'ambivert' and _AMBIVERT:
# self.align = self.align_ambivert
# log_message(
# logging_callback=logging.info,
# msg="Using ambivert alignment backend.",
# extra={'oname': 'Aligner'}
# )
# else:
# self.align = self.align_enrich2
# log_message(
# logging_callback=logging.info,
# msg="Using enrich2 alignment backend.",
# extra={'oname': 'Aligner'}
# )
self.align = self.align_enrich2
log_message(
logging_callback=logging.info,
msg="Using enrich2 alignment backend.",
extra={'oname': 'Aligner'}
)
def align_ambivert(self, seq1, seq2):
"""
Aligns the two sequences, *seq1* and *seq2* and returns a list of
tuples describing the differences between the sequences.
The tuple format is ``(i, j, type, length)``, where ``i`` and ``j``
are the positions in *seq1* and *seq2*, respectively, and type is one
of ``"match"``, ``"mismatch"``, ``"insertion"``, or ``"deletion"``.
For indels, the ``length`` value is the number of bases inserted or
deleted with respect to *seq1* starting at ``i``.
Parameters
----------
seq1 : `str`
Reference sequence.
seq2 : `str`
The sequence that is to be aligned.
Returns
-------
`list`
list of tuples describing the differences between the sequences.
"""
if not isinstance(seq1, str):
raise TypeError("First sequence must be a str type")
if not isinstance(seq2, str):
raise TypeError("Second sequence must be a str type")
if not seq1:
raise ValueError("First sequence must not be empty.")
if not seq2:
raise ValueError("Second sequence must not be empty.")
self.matrix = np.ndarray(
shape=(len(seq1) + 1, len(seq2) + 1),
dtype=np.dtype([('score', np.int), ('trace', np.byte)])
)
seq1 = seq1.upper()
seq2 = seq2.upper()
a1, a2, *_ = self.needleman_wunsch(
seq1, seq2,
gap_open=self.similarity['gap_open'],
gap_extend=self.similarity['gap_extend']
)
backtrace = cigar_to_backtrace(
seq1, seq2,
gapped_alignment_to_cigar(a1, a2)[0]
)
return backtrace
def align_enrich2(self, seq1, seq2):
"""
Aligns the two sequences, *seq1* and *seq2* and returns a list of
tuples describing the differences between the sequences.
The tuple format is ``(i, j, type, length)``, where ``i`` and ``j``
are the positions in *seq1* and *seq2*, respectively, and type is one
of ``"match"``, ``"mismatch"``, ``"insertion"``, or ``"deletion"``.
For indels, the ``length`` value is the number of bases inserted or
deleted with respect to *seq1* starting at ``i``.
Parameters
----------
seq1 : `str`
Reference sequence.
seq2 : `str`
The sequence that is to be aligned.
Returns
-------
`list`
list of tuples describing the differences between the sequences.
"""
if not isinstance(seq1, str):
raise TypeError("First sequence must be a str type")
if not isinstance(seq2, str):
raise TypeError("Second sequence must be a str type")
if not seq1:
raise ValueError("First sequence must not be empty.")
if not seq2:
raise ValueError("Second sequence must not be empty.")
self.matrix = np.ndarray(
shape=(len(seq1) + 1, len(seq2) + 1),
dtype=np.dtype([('score', np.int), ('trace', np.byte)])
)
seq1 = seq1.upper()
seq2 = seq2.upper()
# build matrix of scores/traceback information
for i in range(len(seq1) + 1):
self.matrix[i, 0] = (self.similarity['gap_open'] * i, Aligner._DEL)
for j in range(len(seq2) + 1):
self.matrix[0, j] = (self.similarity['gap_open'] * j, Aligner._INS)
for i in range(1, len(seq1) + 1):
for j in range(1, len(seq2) + 1):
match = (self.matrix[i - 1, j - 1]['score'] +
self.similarity[seq1[i - 1]][seq2[j - 1]],
Aligner._MAT)
delete = (self.matrix[i - 1, j]['score'] +
self.similarity['gap_open'], Aligner._DEL)
insert = (self.matrix[i, j - 1]['score'] +
self.similarity['gap_open'], Aligner._INS)
# traces = [delete, insert, match]
# max_score = max(delete, insert, match, key=lambda x: x[0])[0]
# possible_traces = [t for t in traces if t[0] == max_score]
# priority_move = sorted(possible_traces, key=lambda x: x[1])[0]
# self.matrix[i, j] = priority_move
# def dotype(lol):
# if lol == self._MAT:
# return 'match'
# if lol == self._INS:
# return 'insertion'
# if lol == self._DEL:
# return 'deletion'
# print(i, j)
# print("Possible Scores: {}".format([t[0] for t in possible_traces]))
# print("Possible Tracebacks: {}".format([dotype(t[1]) for t in possible_traces]))
# print("Chosen Traceback: {}".format(dotype(priority_move[1])))
max_score = max(delete, insert, match, key=lambda x: x[0])
self.matrix[i, j] = max_score
self.matrix[0, 0] = (0, Aligner._END)
# calculate alignment from the traceback
i = len(seq1)
j = len(seq2)
traceback = list()
while i > 0 or j > 0:
if self.matrix[i, j]['trace'] == Aligner._MAT:
if seq1[i - 1] == seq2[j - 1]:
traceback.append((i - 1, j - 1, "match", None))
else:
traceback.append((i - 1, j - 1, "mismatch", None))
i -= 1
j -= 1
elif self.matrix[i, j]['trace'] == Aligner._INS:
pos_1 = 0 if (i - 1) < 0 else (i - 1)
traceback.append((pos_1, j - 1, "insertion", 1))
j -= 1
elif self.matrix[i, j]['trace'] == Aligner._DEL:
pos_2 = 0 if (j - 1) < 0 else (j - 1)
traceback.append((i - 1, pos_2, "deletion", 1))
i -= 1
elif self.matrix[i, j]['trace'] == Aligner._END:
pass
else:
raise RuntimeError("Invalid value in alignment traceback.")
traceback.reverse()
# combine indels
indel = None
traceback_combined = list()
for t in traceback:
if t[2] == "insertion" or t[2] == "deletion":
if indel is not None:
if t[2] == indel[2]:
indel[3] += t[3]
else:
raise RuntimeError("Aligner failed to combine indels. "
"Check 'gap_open' penalty.")
else:
indel = list(t)
else:
if indel is not None:
traceback_combined.append(tuple(indel))
indel = None
traceback_combined.append(t)
if indel is not None:
traceback_combined.append(tuple(indel))
self.calls += 1
return traceback_combined
def needleman_wunsch(self, seq1, seq2, gap_open=-1, gap_extend=0):
"""
Wrapper method for Needleman-Wunsch alignment using
the plumb.bob C implementation
Parameters
----------
seq1 : `str`
an ascii DNA sequence string. This is the query
sequence and must be all upper case
seq2 : `str`
an ascii DNA sequence string. This is the reference
sequence and may contain lower case soft masking
gap_open : `int`
Cost for a gap_open open.
gap_extend : `int`
Cost for a gap_open extension.
Returns
-------
`tuple`
A tuple containing aligned seq1, aligned seq2, start position
in seq1 and start position in seq2
"""
DNA_MAP = align.align_ctypes.make_map('ACGTNX', 'N', True)
DNA_SCORE = make_dna_scoring_matrix(self.similarity)
alignment = align.global_align(
bytes(seq1, 'ascii'),
len(seq1),
bytes(seq2.upper(), 'ascii'),
len(seq2),
DNA_MAP[0],
DNA_MAP[1],
DNA_SCORE,
gap_open, gap_extend
)
if '-' in seq1 or '-' in seq2:
raise RuntimeError('Aligning Sequences with gaps is not supported',
seq1, seq2)
start_seq1 = 0
start_seq2 = 0
frag = alignment[0].align_frag
align_seq1 = ''
align_seq2 = ''
while frag:
frag = frag[0]
if frag.type == align.MATCH:
f1 = seq1[frag.sa_start:frag.sa_start + frag.hsp_len]
f2 = seq2[frag.sb_start:frag.sb_start + frag.hsp_len]
align_seq1 += f1
align_seq2 += f2
elif frag.type == align.A_GAP:
align_seq1 += '-' * frag.hsp_len
align_seq2 += seq2[frag.sb_start:frag.sb_start + frag.hsp_len]
elif frag.type == align.B_GAP:
align_seq1 += seq1[frag.sa_start:frag.sa_start + frag.hsp_len]
align_seq2 += '-' * frag.hsp_len
frag = frag.next
assert len(align_seq1) == len(align_seq2)
align.alignment_free(alignment)
return align_seq1, align_seq2, start_seq1, start_seq2
def smith_waterman(self, seq1, seq2, gap_open=-1, gap_extend=0):
"""
Wrapper method for Smith-Waterman alignment using
the plumb.bob C implementation
Parameters
----------
seq1 : `str`
an ascii DNA sequence string. This is the query
sequence and must be all upper case
seq2 : `str`
an ascii DNA sequence string. This is the reference
sequence and may contain lower case soft masking
gap_open : `int`
Cost for a gap_open open.
gap_extend : `int`
Cost for a gap_open extension.
Returns
-------
`tuple`
A tuple containing aligned seq1, aligned seq2, start position
in seq1 and start position in seq2
"""
DNA_MAP = align.align_ctypes.make_map('ACGTNX', 'N', True)
DNA_SCORE = make_dna_scoring_matrix(self.similarity)
alignment = align.local_align(
bytes(seq1, 'ascii'), len(seq1),
bytes(seq2.upper(), 'ascii'), len(seq2),
DNA_MAP[0],
DNA_MAP[1],
DNA_SCORE,
gap_open, gap_extend
)
if '-' in seq1 or '-' in seq2:
raise RuntimeError('Aligning Sequences with gaps is not supported',
seq1, seq2)
start_seq1 = alignment.contents.align_frag.contents.sa_start
start_seq2 = alignment.contents.align_frag.contents.sb_start
frag = alignment[0].align_frag
align_seq1 = ''
align_seq2 = ''
while frag:
frag = frag[0]
if frag.type == align.MATCH:
f1 = seq1[frag.sa_start:frag.sa_start + frag.hsp_len]
f2 = seq2[frag.sb_start:frag.sb_start + frag.hsp_len]
align_seq1 += f1
align_seq2 += f2
elif frag.type == align.A_GAP:
align_seq1 += '-' * frag.hsp_len
align_seq2 += seq2[frag.sb_start:frag.sb_start + frag.hsp_len]
elif frag.type == align.B_GAP:
align_seq1 += seq1[frag.sa_start:frag.sa_start + frag.hsp_len]
align_seq2 += '-' * frag.hsp_len
frag = frag.next
assert len(align_seq1) == len(align_seq2)
align.alignment_free(alignment)
return align_seq1, align_seq2, start_seq1, start_seq2
def cigar_to_backtrace(seq1, seq2, cigar):
"""
Converts a cigar sequence into an enrich2 backtrace
The tuple format is ``(i, j, type, length)``, where ``i`` and ``j``
are the positions in *seq1* and *seq2*, respectively, and type is one
of ``"match"``, ``"mismatch"``, ``"insertion"``, or ``"deletion"``.
For indels, the ``length`` value is the number of bases inserted or
deleted with respect to *seq1* starting at ``i``.
Parameters
----------
seq1 : `str`
The string used during alignment for ``seq1``
seq2 : `str`
The string used during alignment for ``seq2``
cigar : `str`
The cigar string expecting characters {'M', 'I', 'D'}
Returns
-------
`list`
The tuple format is ``(i, j, type, length)``, where ``i`` and ``j``
are the positions in *seq1* and *seq2*, respectively, and type is one
of ``"match"``, ``"mismatch"``, ``"insertion"``, or ``"deletion"``.
For indels, the ``length`` value is the number of bases inserted or
deleted with respect to *seq1* starting at ``i``.
"""
letters = cigar[1::2]
numbers = [int(x) for x in cigar[0::2]]
i = len(seq1)
j = len(seq2)
traceback = []
for num, char in reversed(list(zip(numbers, letters))):
if char == 'M':
for _ in range(num):
if seq1[i - 1] == seq2[j - 1]:
traceback.append((i - 1, j - 1, "match", None))
else:
traceback.append((i - 1, j - 1, "mismatch", None))
i -= 1
j -= 1
elif char == 'I':
pos_1 = 0 if (i - 1) < 0 else (i - 1)
traceback.append((pos_1, j - num, "insertion", num))
j -= num
elif char == 'D':
pos_2 = 0 if (j - 1) < 0 else (j - 1)
traceback.append((i - num, pos_2, "deletion", num))
i -= num
else:
raise RuntimeError("Invalid value in alignment traceback.")
traceback.reverse()
return traceback
def make_dna_scoring_matrix(similarity, ordering='ACGTNX'):
"""
Make a ctype DNA scoring matrix for alignment.
Parameters
----------
similarity : `dict`
Similarity matrix used by the aligner, must contain a cost mapping
between each of 'A', 'C', 'G', 'T', 'N', 'X'.
ordering : `str`
String representing the key order the dictionary should be
traversed to build the square similarity matrix.
Returns
-------
`list`
Matrix in single list format.
"""
similarity_matrix = []
n = len(ordering)
for key_fr in ordering:
for key_to in ordering:
cost = similarity[key_fr][key_to]
similarity_matrix.append(cost)
return (c_int * (n * n))(*similarity_matrix)
def test(seq1, seq2):
from enrich2.sequence.aligner import Aligner
amb = Aligner(backend='ambivert')
aen = Aligner(backend='enrich2')
print('Enrich2: {}'.format(aen.align(seq1, seq2)))
print('AmBiVert: {}'.format(amb.align(seq1, seq2)))
def build_aligners():
from enrich2.sequence.aligner import Aligner
amb = Aligner(backend='ambivert')
aen = Aligner(backend='enrich2')
return amb, aen | daniaki/Enrich2 | enrich2/sequence/aligner.py | Python | gpl-3.0 | 21,237 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.test_runner import make_test_objects
from erpnext.controllers.item_variant import (create_variant, ItemVariantExistsError,
InvalidItemAttributeValueError, get_variant)
from erpnext.stock.doctype.item.item import StockExistsForTemplate
from frappe.model.rename_doc import rename_doc
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
from erpnext.stock.get_item_details import get_item_details
test_ignore = ["BOM"]
test_dependencies = ["Warehouse"]
def make_item(item_code, properties=None):
if frappe.db.exists("Item", item_code):
return frappe.get_doc("Item", item_code)
item = frappe.get_doc({
"doctype": "Item",
"item_code": item_code,
"item_name": item_code,
"description": item_code,
"item_group": "Products"
})
if properties:
item.update(properties)
if item.is_stock_item and not item.default_warehouse:
item.default_warehouse = "_Test Warehouse - _TC"
item.insert()
return item
class TestItem(unittest.TestCase):
def setUp(self):
frappe.flags.attribute_values = None
def get_item(self, idx):
item_code = test_records[idx].get("item_code")
if not frappe.db.exists("Item", item_code):
item = frappe.copy_doc(test_records[idx])
item.insert()
else:
item = frappe.get_doc("Item", item_code)
return item
def test_get_item_details(self):
# delete modified item price record and make as per test_records
frappe.db.sql("""delete from `tabItem Price`""")
to_check = {
"item_code": "_Test Item",
"item_name": "_Test Item",
"description": "_Test Item 1",
"warehouse": "_Test Warehouse - _TC",
"income_account": "Sales - _TC",
"expense_account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center 2 - _TC",
"qty": 1.0,
"price_list_rate": 100.0,
"base_price_list_rate": 0.0,
"discount_percentage": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"batch_no": None,
"item_tax_rate": '{}',
"uom": "_Test UOM",
"conversion_factor": 1.0,
}
make_test_objects("Item Price")
print(frappe.get_all("Item Price"))
details = get_item_details({
"item_code": "_Test Item",
"company": "_Test Company",
"price_list": "_Test Price List",
"currency": "_Test Currency",
"doctype": "Sales Order",
"conversion_rate": 1,
"price_list_currency": "_Test Currency",
"plc_conversion_rate": 1,
"order_type": "Sales",
"customer": "_Test Customer",
"conversion_factor": 1,
"price_list_uom_dependant": 1,
"ignore_pricing_rule": 1
})
for key, value in to_check.iteritems():
self.assertEquals(value, details.get(key))
def test_item_attribute_change_after_variant(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-L", force=1)
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.save()
attribute = frappe.get_doc('Item Attribute', 'Test Size')
attribute.item_attribute_values = []
# reset flags
frappe.flags.attribute_values = None
self.assertRaises(InvalidItemAttributeValueError, attribute.save)
frappe.db.rollback()
def test_make_item_variant(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-L", force=1)
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.save()
# doing it again should raise error
variant = create_variant("_Test Variant Item", {"Test Size": "Large"})
variant.item_code = "_Test Variant Item-L-duplicate"
self.assertRaises(ItemVariantExistsError, variant.save)
def test_copy_fields_from_template_to_variants(self):
frappe.delete_doc_if_exists("Item", "_Test Variant Item-XL", force=1)
fields = [{'field_name': 'item_group'}, {'field_name': 'is_stock_item'}]
allow_fields = [d.get('field_name') for d in fields]
set_item_variant_settings(fields)
if not frappe.db.get_value('Item Attribute Value',
{'parent': 'Test Size', 'attribute_value': 'Extra Large'}, 'name'):
item_attribute = frappe.get_doc('Item Attribute', 'Test Size')
item_attribute.append('item_attribute_values', {
'attribute_value' : 'Extra Large',
'abbr': 'XL'
})
item_attribute.save()
variant = create_variant("_Test Variant Item", {"Test Size": "Extra Large"})
variant.item_code = "_Test Variant Item-XL"
variant.item_name = "_Test Variant Item-XL"
variant.save()
template = frappe.get_doc('Item', '_Test Variant Item')
template.item_group = "_Test Item Group D"
template.save()
variant = frappe.get_doc('Item', '_Test Variant Item-XL')
for fieldname in allow_fields:
self.assertEquals(template.get(fieldname), variant.get(fieldname))
template = frappe.get_doc('Item', '_Test Variant Item')
template.item_group = "_Test Item Group Desktops"
template.save()
def test_make_item_variant_with_numeric_values(self):
# cleanup
for d in frappe.db.get_all('Item', filters={'variant_of':
'_Test Numeric Template Item'}):
frappe.delete_doc_if_exists("Item", d.name)
frappe.delete_doc_if_exists("Item", "_Test Numeric Template Item")
frappe.delete_doc_if_exists("Item Attribute", "Test Item Length")
frappe.db.sql('''delete from `tabItem Variant Attribute`
where attribute="Test Item Length"''')
frappe.flags.attribute_values = None
# make item attribute
frappe.get_doc({
"doctype": "Item Attribute",
"attribute_name": "Test Item Length",
"numeric_values": 1,
"from_range": 0.0,
"to_range": 100.0,
"increment": 0.5
}).insert()
# make template item
make_item("_Test Numeric Template Item", {
"attributes": [
{
"attribute": "Test Size"
},
{
"attribute": "Test Item Length",
"numeric_values": 1,
"from_range": 0.0,
"to_range": 100.0,
"increment": 0.5
}
],
"default_warehouse": "_Test Warehouse - _TC",
"has_variants": 1
})
variant = create_variant("_Test Numeric Template Item",
{"Test Size": "Large", "Test Item Length": 1.1})
self.assertEquals(variant.item_code, "_Test Numeric Template Item-L-1.1")
variant.item_code = "_Test Numeric Variant-L-1.1"
variant.item_name = "_Test Numeric Variant Large 1.1m"
self.assertRaises(InvalidItemAttributeValueError, variant.save)
variant = create_variant("_Test Numeric Template Item",
{"Test Size": "Large", "Test Item Length": 1.5})
self.assertEquals(variant.item_code, "_Test Numeric Template Item-L-1.5")
variant.item_code = "_Test Numeric Variant-L-1.5"
variant.item_name = "_Test Numeric Variant Large 1.5m"
variant.save()
def test_item_merging(self):
create_item("Test Item for Merging 1")
create_item("Test Item for Merging 2")
make_stock_entry(item_code="Test Item for Merging 1", target="_Test Warehouse - _TC",
qty=1, rate=100)
make_stock_entry(item_code="Test Item for Merging 2", target="_Test Warehouse 1 - _TC",
qty=1, rate=100)
rename_doc("Item", "Test Item for Merging 1", "Test Item for Merging 2", merge=True)
self.assertFalse(frappe.db.exists("Item", "Test Item for Merging 1"))
self.assertTrue(frappe.db.get_value("Bin",
{"item_code": "Test Item for Merging 2", "warehouse": "_Test Warehouse - _TC"}))
self.assertTrue(frappe.db.get_value("Bin",
{"item_code": "Test Item for Merging 2", "warehouse": "_Test Warehouse 1 - _TC"}))
def test_item_variant_by_manufacturer(self):
fields = [{'field_name': 'description'}, {'field_name': 'variant_based_on'}]
set_item_variant_settings(fields)
if frappe.db.exists('Item', '_Test Variant Mfg'):
frappe.delete_doc('Item', '_Test Variant Mfg')
if frappe.db.exists('Item', '_Test Variant Mfg-1'):
frappe.delete_doc('Item', '_Test Variant Mfg-1')
if frappe.db.exists('Manufacturer', 'MSG1'):
frappe.delete_doc('Manufacturer', 'MSG1')
template = frappe.get_doc(dict(
doctype='Item',
item_code='_Test Variant Mfg',
has_variant=1,
item_group='Products',
variant_based_on='Manufacturer'
)).insert()
manufacturer = frappe.get_doc(dict(
doctype='Manufacturer',
short_name='MSG1'
)).insert()
variant = get_variant(template.name, manufacturer=manufacturer.name)
self.assertEquals(variant.item_code, '_Test Variant Mfg-1')
self.assertEquals(variant.description, '_Test Variant Mfg')
self.assertEquals(variant.manufacturer, 'MSG1')
variant.insert()
variant = get_variant(template.name, manufacturer=manufacturer.name,
manufacturer_part_no='007')
self.assertEquals(variant.item_code, '_Test Variant Mfg-2')
self.assertEquals(variant.description, '_Test Variant Mfg')
self.assertEquals(variant.manufacturer, 'MSG1')
self.assertEquals(variant.manufacturer_part_no, '007')
def test_stock_exists_against_template_item(self):
stock_item = frappe.get_all('Stock Ledger Entry', fields = ["item_code"], limit=1)
if stock_item:
item_code = stock_item[0].item_code
item_doc = frappe.get_doc('Item', item_code)
item_doc.has_variants = 1
self.assertRaises(StockExistsForTemplate, item_doc.save)
def set_item_variant_settings(fields):
doc = frappe.get_doc('Item Variant Settings')
doc.set('fields', fields)
doc.save()
def make_item_variant():
if not frappe.db.exists("Item", "_Test Variant Item-S"):
variant = create_variant("_Test Variant Item", """{"Test Size": "Small"}""")
variant.item_code = "_Test Variant Item-S"
variant.item_name = "_Test Variant Item-S"
variant.save()
def get_total_projected_qty(item):
total_qty = frappe.db.sql(""" select sum(projected_qty) as projected_qty from tabBin
where item_code = %(item)s""", {'item': item}, as_dict=1)
return total_qty[0].projected_qty if total_qty else 0.0
test_records = frappe.get_test_records('Item')
def create_item(item_code, is_stock_item=None):
if not frappe.db.exists("Item", item_code):
item = frappe.new_doc("Item")
item.item_code = item_code
item.item_name = item_code
item.description = item_code
item.item_group = "All Item Groups"
item.is_stock_item = is_stock_item or 1
item.save()
| tfroehlich82/erpnext | erpnext/stock/doctype/item/test_item.py | Python | gpl-3.0 | 10,134 |